diff --git a/.github/workflows/ci_release.yml b/.github/workflows/ci_release.yml index 654c3c0a34..e10a032681 100644 --- a/.github/workflows/ci_release.yml +++ b/.github/workflows/ci_release.yml @@ -1,5 +1,6 @@ name: CI and Release on: + merge_group: push: branches: - main @@ -24,7 +25,7 @@ on: jobs: # Dockerfile Linting hadolint: - uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_lint.yml@v0.2.0 # yamllint disable-line rule:line-length + uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_lint.yml@v0.2.2 # yamllint disable-line rule:line-length with: dockerfile: Dockerfile @@ -32,7 +33,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: celestiaorg/.github/.github/actions/yamllint@v0.2.0 + - uses: celestiaorg/.github/.github/actions/yamllint@v0.2.2 markdown-lint: name: Markdown Lint @@ -58,7 +59,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Version Release - uses: celestiaorg/.github/.github/actions/version-release@v0.2.0 + uses: celestiaorg/.github/.github/actions/version-release@v0.2.2 with: github-token: ${{secrets.GITHUB_TOKEN}} version-bump: ${{inputs.version}} diff --git a/.github/workflows/docker-build-publish.yml b/.github/workflows/docker-build-publish.yml index 0ddebbd750..2ce01c81df 100644 --- a/.github/workflows/docker-build-publish.yml +++ b/.github/workflows/docker-build-publish.yml @@ -2,6 +2,7 @@ name: Docker Build & Publish # Trigger on all push events, new semantic version tags, and all PRs on: + merge_group: push: branches: - "**" @@ -17,6 +18,6 @@ jobs: permissions: contents: write packages: write - uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_pipeline.yml@v0.2.0 # yamllint disable-line rule:line-length + uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_pipeline.yml@v0.2.2 # yamllint disable-line rule:line-length with: dockerfile: Dockerfile diff --git a/.github/workflows/go-ci.yml b/.github/workflows/go-ci.yml index e6c4e5881b..41b3a82994 100644 --- a/.github/workflows/go-ci.yml +++ b/.github/workflows/go-ci.yml @@ -4,7 +4,7 @@ on: workflow_call: env: - GO_VERSION: '1.20' + GO_VERSION: '1.21' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -25,7 +25,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v3.6.0 with: - version: v1.52.2 + version: v1.54.2 go_mod_tidy_check: name: Go Mod Tidy Check diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml index bed2b3352c..b9d4351bbd 100644 --- a/.github/workflows/labels.yml +++ b/.github/workflows/labels.yml @@ -16,4 +16,4 @@ jobs: with: mode: minimum count: 1 - labels: "kind:fix, kind:misc, kind:break!, kind:refactor, kind:feat, kind:deps, kind:docs, kind:ci, kind:chore" # yamllint disable-line rule:line-length + labels: "kind:fix, kind:misc, kind:break!, kind:refactor, kind:feat, kind:deps, kind:docs, kind:ci, kind:chore, kind:testing" # yamllint disable-line rule:line-length diff --git a/.golangci.yml b/.golangci.yml index 5f7b13e6a5..a0f2754a9b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -4,7 +4,7 @@ run: linters: enable: - bodyclose - - depguard + # - depguard as of v1.54.2, the default config throws errors on our repo - dogsled - dupl - errcheck diff --git a/Dockerfile b/Dockerfile index 9346c559d1..c8e9bb2e1c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM docker.io/golang:1.20-alpine3.17 as builder +FROM docker.io/golang:1.21-alpine3.18 as builder # hadolint ignore=DL3018 RUN apk update && apk add --no-cache \ @@ -15,7 +15,7 @@ COPY . . RUN make build && make cel-key -FROM docker.io/alpine:3.18.0 +FROM docker.io/alpine:3.18.2 # Read here why UID 10001: https://github.com/hexops/dockerfile/blob/main/README.md#do-not-use-a-uid-below-10000 ARG UID=10001 diff --git a/Makefile b/Makefile index 8f4d1b42a9..feef6172aa 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,7 @@ SHELL=/usr/bin/env bash PROJECTNAME=$(shell basename "$(PWD)") -LDFLAGS=-ldflags="-X 'main.buildTime=$(shell date)' -X 'main.lastCommit=$(shell git rev-parse HEAD)' -X 'main.semanticVersion=$(shell git describe --tags --dirty=-dev)'" +versioningPath := "github.com/celestiaorg/celestia-node/nodebuilder/node" +LDFLAGS=-ldflags="-X '$(versioningPath).buildTime=$(shell date)' -X '$(versioningPath).lastCommit=$(shell git rev-parse HEAD)' -X '$(versioningPath).semanticVersion=$(shell git describe --tags --dirty=-dev 2>/dev/null || git rev-parse --abbrev-ref HEAD)'" ifeq (${PREFIX},) PREFIX := /usr/local endif @@ -81,7 +82,7 @@ install-key: fmt: sort-imports @find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs gofmt -w -s @find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs goimports -w -local github.com/celestiaorg - @go mod tidy -compat=1.17 + @go mod tidy -compat=1.20 @cfmt -w -m=100 ./... @markdownlint --fix --quiet --config .markdownlint.yaml . .PHONY: fmt @@ -136,13 +137,14 @@ PB_PKGS=$(shell find . -name 'pb' -type d) PB_CORE=$(shell go list -f {{.Dir}} -m github.com/tendermint/tendermint) PB_GOGO=$(shell go list -f {{.Dir}} -m github.com/gogo/protobuf) PB_CELESTIA_APP=$(shell go list -f {{.Dir}} -m github.com/celestiaorg/celestia-app) +PB_NMT=$(shell go list -f {{.Dir}} -m github.com/celestiaorg/nmt) ## pb-gen: Generate protobuf code for all /pb/*.proto files in the project. pb-gen: @echo '--> Generating protobuf' @for dir in $(PB_PKGS); \ do for file in `find $$dir -type f -name "*.proto"`; \ - do protoc -I=. -I=${PB_CORE}/proto/ -I=${PB_GOGO} -I=${PB_CELESTIA_APP}/proto --gogofaster_out=paths=source_relative:. $$file; \ + do protoc -I=. -I=${PB_CORE}/proto/ -I=${PB_GOGO} -I=${PB_CELESTIA_APP}/proto -I=${PB_NMT} --gogofaster_out=paths=source_relative:. $$file; \ echo '-->' $$file; \ done; \ done; @@ -159,14 +161,14 @@ openrpc-gen: lint-imports: @echo "--> Running imports linter" @for file in `find . -type f -name '*.go'`; \ - do goimports-reviser -list-diff -set-exit-status -company-prefixes "github.com/celestiaorg" -project-name "github.com/celestiaorg/celestia-node" -output stdout $$file \ + do goimports-reviser -list-diff -set-exit-status -company-prefixes "github.com/celestiaorg" -project-name "github.com/celestiaorg/"$(PROJECTNAME)"" -output stdout $$file \ || exit 1; \ done; .PHONY: lint-imports ## sort-imports: Sort Go imports. sort-imports: - @goimports-reviser -company-prefixes "github.com/celestiaorg" -project-name "github.com/celestiaorg/celestia-node" -output stdout ./... + @goimports-reviser -company-prefixes "github.com/celestiaorg" -project-name "github.com/celestiaorg/"$(PROJECTNAME)"" -output stdout . .PHONY: sort-imports ## adr-gen: Generate ADR from template. Must set NUM and TITLE parameters. diff --git a/README.md b/README.md index 76c722275d..0711c2d223 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ Continue reading [here](https://blog.celestia.org/celestia-mvp-release-data-avai | Requirement | Notes | | ----------- |----------------| -| Go version | 1.20 or higher | +| Go version | 1.21 or higher | ## System Requirements diff --git a/api/docgen/examples.go b/api/docgen/examples.go index 80a8c64d93..b873e7e050 100644 --- a/api/docgen/examples.go +++ b/api/docgen/examples.go @@ -56,7 +56,7 @@ var ExampleValues = map[reflect.Type]interface{}{ reflect.TypeOf(node.Full): node.Full, reflect.TypeOf(auth.Permission("admin")): auth.Permission("admin"), reflect.TypeOf(byzantine.BadEncoding): byzantine.BadEncoding, - reflect.TypeOf((*fraud.Proof)(nil)).Elem(): byzantine.CreateBadEncodingProof( + reflect.TypeOf((*fraud.Proof[*header.ExtendedHeader])(nil)).Elem(): byzantine.CreateBadEncodingProof( []byte("bad encoding proof"), 42, &byzantine.ErrByzantine{ @@ -133,13 +133,13 @@ func init() { } addToExampleValues(addrInfo) - namespace, err := share.NewNamespaceV0([]byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x10}) + namespace, err := share.NewBlobNamespaceV0([]byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x10}) if err != nil { panic(err) } addToExampleValues(namespace) - generatedBlob, err := blob.NewBlob(0, namespace, []byte("This is an example of some blob data")) + generatedBlob, err := blob.NewBlobV0(namespace, []byte("This is an example of some blob data")) if err != nil { panic(err) } diff --git a/api/gateway/endpoints.go b/api/gateway/endpoints.go index 0ae93b112c..9600138909 100644 --- a/api/gateway/endpoints.go +++ b/api/gateway/endpoints.go @@ -33,13 +33,13 @@ func (h *Handler) RegisterEndpoints(rpc *Server, deprecatedEndpointsEnabled bool rpc.RegisterHandlerFunc(submitTxEndpoint, h.handleSubmitTx, http.MethodPost) // share endpoints - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}/height/{%s}", namespacedSharesEndpoint, nIDKey, heightKey), + rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}/height/{%s}", namespacedSharesEndpoint, namespaceKey, heightKey), h.handleSharesByNamespaceRequest, http.MethodGet) - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", namespacedSharesEndpoint, nIDKey), + rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", namespacedSharesEndpoint, namespaceKey), h.handleSharesByNamespaceRequest, http.MethodGet) - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}/height/{%s}", namespacedDataEndpoint, nIDKey, heightKey), + rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}/height/{%s}", namespacedDataEndpoint, namespaceKey, heightKey), h.handleDataByNamespaceRequest, http.MethodGet) - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", namespacedDataEndpoint, nIDKey), + rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", namespacedDataEndpoint, namespaceKey), h.handleDataByNamespaceRequest, http.MethodGet) // DAS endpoints diff --git a/api/gateway/middleware.go b/api/gateway/middleware.go index 498b9c5d64..2c88b34185 100644 --- a/api/gateway/middleware.go +++ b/api/gateway/middleware.go @@ -18,9 +18,17 @@ func (h *Handler) RegisterMiddleware(srv *Server) { setContentType, checkPostDisabled(h.state), wrapRequestContext, + enableCors, ) } +func enableCors(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + next.ServeHTTP(w, r) + }) +} + func setContentType(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "application/json") diff --git a/api/gateway/server_test.go b/api/gateway/server_test.go index e98d7a6091..cb8e3d17ae 100644 --- a/api/gateway/server_test.go +++ b/api/gateway/server_test.go @@ -12,8 +12,12 @@ import ( "github.com/stretchr/testify/require" ) +const ( + address = "localhost" + port = "0" +) + func TestServer(t *testing.T) { - address, port := "localhost", "0" server := NewServer(address, port) ctx, cancel := context.WithCancel(context.Background()) @@ -42,10 +46,33 @@ func TestServer(t *testing.T) { require.NoError(t, err) } +func TestCorsEnabled(t *testing.T) { + server := NewServer(address, port) + server.RegisterMiddleware(enableCors) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + err := server.Start(ctx) + require.NoError(t, err) + + // register ping handler + ping := new(ping) + server.RegisterHandlerFunc("/ping", ping.ServeHTTP, http.MethodGet) + + url := fmt.Sprintf("http://%s/ping", server.ListenAddr()) + + resp, err := http.Get(url) + require.NoError(t, err) + defer resp.Body.Close() + + require.NoError(t, err) + require.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), "*") +} + // TestServer_contextLeakProtection tests to ensure a context // deadline was added by the context wrapper middleware server-side. func TestServer_contextLeakProtection(t *testing.T) { - address, port := "localhost", "0" server := NewServer(address, port) server.RegisterMiddleware(wrapRequestContext) diff --git a/api/gateway/share.go b/api/gateway/share.go index db5ed37286..c9dec071f3 100644 --- a/api/gateway/share.go +++ b/api/gateway/share.go @@ -10,7 +10,6 @@ import ( "github.com/gorilla/mux" "github.com/celestiaorg/celestia-app/pkg/shares" - "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/celestia-node/share" ) @@ -20,7 +19,7 @@ const ( namespacedDataEndpoint = "/namespaced_data" ) -var nIDKey = "nid" +var namespaceKey = "nid" // NamespacedSharesResponse represents the response to a // SharesByNamespace request. @@ -37,12 +36,12 @@ type NamespacedDataResponse struct { } func (h *Handler) handleSharesByNamespaceRequest(w http.ResponseWriter, r *http.Request) { - height, nID, err := parseGetByNamespaceArgs(r) + height, namespace, err := parseGetByNamespaceArgs(r) if err != nil { writeError(w, http.StatusBadRequest, namespacedSharesEndpoint, err) return } - shares, err := h.getShares(r.Context(), height, nID) + shares, err := h.getShares(r.Context(), height, namespace) if err != nil { writeError(w, http.StatusInternalServerError, namespacedSharesEndpoint, err) return @@ -62,12 +61,12 @@ func (h *Handler) handleSharesByNamespaceRequest(w http.ResponseWriter, r *http. } func (h *Handler) handleDataByNamespaceRequest(w http.ResponseWriter, r *http.Request) { - height, nID, err := parseGetByNamespaceArgs(r) + height, namespace, err := parseGetByNamespaceArgs(r) if err != nil { writeError(w, http.StatusBadRequest, namespacedDataEndpoint, err) return } - shares, err := h.getShares(r.Context(), height, nID) + shares, err := h.getShares(r.Context(), height, namespace) if err != nil { writeError(w, http.StatusInternalServerError, namespacedDataEndpoint, err) return @@ -91,13 +90,13 @@ func (h *Handler) handleDataByNamespaceRequest(w http.ResponseWriter, r *http.Re } } -func (h *Handler) getShares(ctx context.Context, height uint64, nID namespace.ID) ([]share.Share, error) { +func (h *Handler) getShares(ctx context.Context, height uint64, namespace share.Namespace) ([]share.Share, error) { header, err := h.header.GetByHeight(ctx, height) if err != nil { return nil, err } - shares, err := h.share.GetSharesByNamespace(ctx, header.DAH, nID) + shares, err := h.share.GetSharesByNamespace(ctx, header.DAH, namespace) if err != nil { return nil, err } @@ -124,7 +123,7 @@ func dataFromShares(input []share.Share) (data [][]byte, err error) { return data, nil } -func parseGetByNamespaceArgs(r *http.Request) (height uint64, nID namespace.ID, err error) { +func parseGetByNamespaceArgs(r *http.Request) (height uint64, namespace share.Namespace, err error) { vars := mux.Vars(r) // if a height was given, parse it, otherwise get namespaced shares/data from the latest header if strHeight, ok := vars[heightKey]; ok { @@ -133,11 +132,10 @@ func parseGetByNamespaceArgs(r *http.Request) (height uint64, nID namespace.ID, return 0, nil, err } } - hexNID := vars[nIDKey] - nID, err = hex.DecodeString(hexNID) + hexNamespace := vars[namespaceKey] + namespace, err = hex.DecodeString(hexNamespace) if err != nil { return 0, nil, err } - - return height, nID, nil + return height, namespace, namespace.ValidateForData() } diff --git a/api/gateway/share_test.go b/api/gateway/share_test.go index 16cf606680..9b12240f62 100644 --- a/api/gateway/share_test.go +++ b/api/gateway/share_test.go @@ -8,8 +8,9 @@ import ( coretypes "github.com/tendermint/tendermint/types" "github.com/celestiaorg/celestia-app/pkg/appconsts" - "github.com/celestiaorg/celestia-app/pkg/namespace" "github.com/celestiaorg/celestia-app/pkg/shares" + + "github.com/celestiaorg/celestia-node/share/sharetest" ) func Test_dataFromShares(t *testing.T) { @@ -19,13 +20,13 @@ func Test_dataFromShares(t *testing.T) { []byte("BEEEEAHP"), } - ns := namespace.RandomBlobNamespace() + ns := sharetest.RandV0Namespace() sss := shares.NewSparseShareSplitter() for _, data := range testData { b := coretypes.Blob{ Data: data, - NamespaceID: ns.ID, - NamespaceVersion: ns.Version, + NamespaceID: ns.ID(), + NamespaceVersion: ns.Version(), ShareVersion: appconsts.ShareVersionZero, } err := sss.Write(b) diff --git a/api/gateway/state.go b/api/gateway/state.go index b584b00d36..69900b0bfc 100644 --- a/api/gateway/state.go +++ b/api/gateway/state.go @@ -9,8 +9,6 @@ import ( "github.com/cosmos/cosmos-sdk/types" "github.com/gorilla/mux" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - "github.com/celestiaorg/celestia-node/blob" "github.com/celestiaorg/celestia-node/state" ) @@ -131,7 +129,7 @@ func (h *Handler) handleSubmitPFB(w http.ResponseWriter, r *http.Request) { writeError(w, http.StatusBadRequest, submitPFBEndpoint, err) return } - nID, err := hex.DecodeString(req.NamespaceID) + namespace, err := hex.DecodeString(req.NamespaceID) if err != nil { writeError(w, http.StatusBadRequest, submitPFBEndpoint, err) return @@ -143,17 +141,22 @@ func (h *Handler) handleSubmitPFB(w http.ResponseWriter, r *http.Request) { } fee := types.NewInt(req.Fee) - constructedBlob, err := blob.NewBlob(appconsts.DefaultShareVersion, nID, data) + constructedBlob, err := blob.NewBlobV0(namespace, data) if err != nil { writeError(w, http.StatusBadRequest, submitPFBEndpoint, err) return } // perform request - txResp, txerr := h.state.SubmitPayForBlob(r.Context(), fee, req.GasLimit, []*blob.Blob{constructedBlob}) - if txerr != nil && txResp == nil { - // no tx data to return - writeError(w, http.StatusInternalServerError, submitPFBEndpoint, err) + txResp, err := h.state.SubmitPayForBlob(r.Context(), fee, req.GasLimit, []*blob.Blob{constructedBlob}) + if err != nil { + if txResp == nil { + // no tx data to return + writeError(w, http.StatusBadRequest, submitPFBEndpoint, err) + return + } + // if error returned, change status from 200 to 206 + w.WriteHeader(http.StatusPartialContent) } bs, err := json.Marshal(&txResp) @@ -162,10 +165,6 @@ func (h *Handler) handleSubmitPFB(w http.ResponseWriter, r *http.Request) { return } - // if error returned, change status from 200 to 206 - if txerr != nil { - w.WriteHeader(http.StatusPartialContent) - } _, err = w.Write(bs) if err != nil { log.Errorw("writing response", "endpoint", submitPFBEndpoint, "err", err) diff --git a/api/gateway/state_test.go b/api/gateway/state_test.go index a613471a04..aa9196cc8d 100644 --- a/api/gateway/state_test.go +++ b/api/gateway/state_test.go @@ -34,7 +34,7 @@ func TestHandleSubmitPFB(t *testing.T) { mock.EXPECT().SubmitPayForBlob(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). Return(&txResponse, timedErr) - ns, err := share.NewNamespaceV0([]byte("abc")) + ns, err := share.NewBlobNamespaceV0([]byte("abc")) require.NoError(t, err) hexNs := hex.EncodeToString(ns[:]) diff --git a/api/rpc/server.go b/api/rpc/server.go index a4c8c21ce7..3357140e68 100644 --- a/api/rpc/server.go +++ b/api/rpc/server.go @@ -2,7 +2,6 @@ package rpc import ( "context" - "encoding/json" "net" "net/http" "reflect" @@ -15,6 +14,7 @@ import ( logging "github.com/ipfs/go-log/v2" "github.com/celestiaorg/celestia-node/api/rpc/perms" + "github.com/celestiaorg/celestia-node/libs/authtoken" ) var log = logging.Logger("rpc") @@ -51,17 +51,7 @@ func NewServer(address, port string, secret jwt.Signer) *Server { // reached if a token is provided in the header of the request, otherwise only // methods with `read` permissions are accessible. func (s *Server) verifyAuth(_ context.Context, token string) ([]auth.Permission, error) { - tk, err := jwt.ParseAndVerifyString(token, s.auth) - if err != nil { - return nil, err - } - p := new(perms.JWTPayload) - err = json.Unmarshal(tk.RawClaims(), p) - if err != nil { - return nil, err - } - // check permissions - return p.Allow, nil + return authtoken.ExtractSignedPermissions(s.auth, token) } // RegisterService registers a service onto the RPC server. All methods on the service will then be diff --git a/blob/blob.go b/blob/blob.go index 9771714cb9..e9ad2b6255 100644 --- a/blob/blob.go +++ b/blob/blob.go @@ -5,11 +5,13 @@ import ( "encoding/json" "fmt" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/x/blob/types" "github.com/celestiaorg/nmt" - "github.com/celestiaorg/nmt/namespace" + "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/ipld" ) @@ -101,41 +103,51 @@ type Blob struct { types.Blob `json:"blob"` Commitment Commitment `json:"commitment"` + + // the celestia-node's namespace type + // this is to avoid converting to and from app's type + namespace share.Namespace } -// NewBlob constructs a new blob from the provided namespace.ID and data. -func NewBlob(shareVersion uint8, namespace namespace.ID, data []byte) (*Blob, error) { - if len(namespace) != appns.NamespaceSize { - return nil, fmt.Errorf("invalid size of the namespace id. got:%d, want:%d", len(namespace), appns.NamespaceSize) - } +// NewBlobV0 constructs a new blob from the provided Namespace and data. +// The blob will be formatted as v0 shares. +func NewBlobV0(namespace share.Namespace, data []byte) (*Blob, error) { + return NewBlob(appconsts.ShareVersionZero, namespace, data) +} - ns, err := appns.New(namespace[appns.NamespaceVersionSize-1], namespace[appns.NamespaceVersionSize:]) - if err != nil { +// NewBlob constructs a new blob from the provided Namespace, data and share version. +func NewBlob(shareVersion uint8, namespace share.Namespace, data []byte) (*Blob, error) { + if len(data) == 0 || len(data) > appconsts.DefaultMaxBytes { + return nil, fmt.Errorf("blob data must be > 0 && <= %d, but it was %d bytes", appconsts.DefaultMaxBytes, len(data)) + } + if err := namespace.ValidateForBlob(); err != nil { return nil, err } - blob, err := types.NewBlob(ns, data, shareVersion) - if err != nil { - return nil, err + blob := tmproto.Blob{ + NamespaceId: namespace.ID(), + Data: data, + ShareVersion: uint32(shareVersion), + NamespaceVersion: uint32(namespace.Version()), } - com, err := types.CreateCommitment(blob) + com, err := types.CreateCommitment(&blob) if err != nil { return nil, err } - return &Blob{Blob: *blob, Commitment: com}, nil + return &Blob{Blob: blob, Commitment: com, namespace: namespace}, nil } // Namespace returns blob's namespace. -func (b *Blob) Namespace() namespace.ID { - return append([]byte{uint8(b.NamespaceVersion)}, b.NamespaceId...) +func (b *Blob) Namespace() share.Namespace { + return b.namespace } type jsonBlob struct { - Namespace namespace.ID `json:"namespace"` - Data []byte `json:"data"` - ShareVersion uint32 `json:"share_version"` - Commitment Commitment `json:"commitment"` + Namespace share.Namespace `json:"namespace"` + Data []byte `json:"data"` + ShareVersion uint32 `json:"share_version"` + Commitment Commitment `json:"commitment"` } func (b *Blob) MarshalJSON() ([]byte, error) { @@ -155,10 +167,11 @@ func (b *Blob) UnmarshalJSON(data []byte) error { return err } - b.Blob.NamespaceVersion = uint32(blob.Namespace[0]) - b.Blob.NamespaceId = blob.Namespace[1:] + b.Blob.NamespaceVersion = uint32(blob.Namespace.Version()) + b.Blob.NamespaceId = blob.Namespace.ID() b.Blob.Data = blob.Data b.Blob.ShareVersion = blob.ShareVersion b.Commitment = blob.Commitment + b.namespace = blob.Namespace return nil } diff --git a/blob/blob_test.go b/blob/blob_test.go index 3aabd6559b..85486ad125 100644 --- a/blob/blob_test.go +++ b/blob/blob_test.go @@ -8,14 +8,13 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/types" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" apptypes "github.com/celestiaorg/celestia-app/x/blob/types" "github.com/celestiaorg/celestia-node/blob/blobtest" ) func TestBlob(t *testing.T) { - appBlobs, err := blobtest.GenerateBlobs([]int{1}, false) + appBlobs, err := blobtest.GenerateV0Blobs([]int{1}, false) require.NoError(t, err) blob, err := convertBlobs(appBlobs...) require.NoError(t, err) @@ -42,12 +41,9 @@ func TestBlob(t *testing.T) { }, }, { - name: "verify nID", + name: "verify namespace", expectedRes: func(t *testing.T) { - ns, err := appns.New( - blob[0].Namespace()[appns.NamespaceVersionSize-1], - blob[0].Namespace()[appns.NamespaceVersionSize:], - ) + ns := blob[0].Namespace().ToAppNamespace() require.NoError(t, err) require.NoError(t, apptypes.ValidateBlobNamespace(ns)) }, diff --git a/blob/blobtest/testing.go b/blob/blobtest/testing.go index 395ef4167a..a22f22f790 100644 --- a/blob/blobtest/testing.go +++ b/blob/blobtest/testing.go @@ -11,14 +11,16 @@ import ( "github.com/celestiaorg/celestia-node/share" ) -func GenerateBlobs(sizes []int, sameNID bool) ([]types.Blob, error) { +// GenerateV0Blobs is a test utility producing v0 share formatted blobs with the +// requested size and random namespaces. +func GenerateV0Blobs(sizes []int, sameNamespace bool) ([]types.Blob, error) { blobs := make([]types.Blob, 0, len(sizes)) for _, size := range sizes { size := rawBlobSize(appconsts.FirstSparseShareContentSize * size) appBlob := testfactory.GenerateRandomBlob(size) - if !sameNID { - nid, err := share.NewNamespaceV0(tmrand.Bytes(7)) + if !sameNamespace { + nid, err := share.NewBlobNamespaceV0(tmrand.Bytes(7)) if err != nil { return nil, err } diff --git a/blob/helper.go b/blob/helper.go index 1fef41dc22..5627fac998 100644 --- a/blob/helper.go +++ b/blob/helper.go @@ -6,7 +6,6 @@ import ( "github.com/tendermint/tendermint/types" - "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/pkg/shares" "github.com/celestiaorg/celestia-node/share" @@ -19,8 +18,8 @@ func SharesToBlobs(rawShares []share.Share) ([]*Blob, error) { } appShares := make([]shares.Share, 0, len(rawShares)) - for _, sh := range rawShares { - bShare, err := shares.NewShare(sh) + for _, shr := range rawShares { + bShare, err := shares.NewShare(shr) if err != nil { return nil, err } @@ -32,6 +31,11 @@ func SharesToBlobs(rawShares []share.Share) ([]*Blob, error) { return nil, err } + // ensure that sequence length is not 0 + if len(shareSequences) == 0 { + return nil, ErrBlobNotFound + } + blobs := make([]*Blob, len(shareSequences)) for i, sequence := range shareSequences { data, err := sequence.RawData() @@ -74,29 +78,13 @@ func BlobsToShares(blobs ...*Blob) ([]share.Share, error) { return val <= 0 }) - rawShares, err := shares.SplitBlobs(0, nil, b, false) + rawShares, err := shares.SplitBlobs(b...) if err != nil { return nil, err } return shares.ToBytes(rawShares), nil } -const ( - perByteGasTolerance = 2 - pfbGasFixedCost = 80000 -) - -// estimateGas estimates the gas required to pay for a set of blobs in a PFB. -func estimateGas(blobs ...*Blob) uint64 { - totalByteCount := 0 - for _, blob := range blobs { - totalByteCount += len(blob.Data) + appconsts.NamespaceSize - } - variableGasAmount := (appconsts.DefaultGasPerBlobByte + perByteGasTolerance) * totalByteCount - - return uint64(variableGasAmount + pfbGasFixedCost) -} - // constructAndVerifyBlob reconstruct a Blob from the passed shares and compares commitments. func constructAndVerifyBlob(sh []share.Share, commitment Commitment) (*Blob, bool, error) { blob, err := SharesToBlobs(sh) diff --git a/blob/service.go b/blob/service.go index 0b43493e27..da14fc06c7 100644 --- a/blob/service.go +++ b/blob/service.go @@ -10,9 +10,7 @@ import ( "github.com/cosmos/cosmos-sdk/types" logging "github.com/ipfs/go-log/v2" - "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/pkg/shares" - "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" @@ -37,7 +35,7 @@ type Service struct { blobSumitter Submitter // shareGetter retrieves the EDS to fetch all shares from the requested header. shareGetter share.Getter - // headerGetter fetches header by the provided height + // headerGetter fetches header by the provided height headerGetter func(context.Context, uint64) (*header.ExtendedHeader, error) } @@ -56,15 +54,11 @@ func NewService( // Submit sends PFB transaction and reports the height in which it was included. // Allows sending multiple Blobs atomically synchronously. // Uses default wallet registered on the Node. +// Handles gas estimation and fee calculation. func (s *Service) Submit(ctx context.Context, blobs []*Blob) (uint64, error) { log.Debugw("submitting blobs", "amount", len(blobs)) - var ( - gasLimit = estimateGas(blobs...) - fee = int64(appconsts.DefaultMinGasPrice * float64(gasLimit)) - ) - - resp, err := s.blobSumitter.SubmitPayForBlob(ctx, types.NewInt(fee), gasLimit, blobs) + resp, err := s.blobSumitter.SubmitPayForBlob(ctx, types.OneInt().Neg(), 0, blobs) if err != nil { return 0, err } @@ -72,8 +66,8 @@ func (s *Service) Submit(ctx context.Context, blobs []*Blob) (uint64, error) { } // Get retrieves all the blobs for given namespaces at the given height by commitment. -func (s *Service) Get(ctx context.Context, height uint64, nID namespace.ID, commitment Commitment) (*Blob, error) { - blob, _, err := s.getByCommitment(ctx, height, nID, commitment) +func (s *Service) Get(ctx context.Context, height uint64, ns share.Namespace, commitment Commitment) (*Blob, error) { + blob, _, err := s.getByCommitment(ctx, height, ns, commitment) if err != nil { return nil, err } @@ -85,10 +79,10 @@ func (s *Service) Get(ctx context.Context, height uint64, nID namespace.ID, comm func (s *Service) GetProof( ctx context.Context, height uint64, - nID namespace.ID, + namespace share.Namespace, commitment Commitment, ) (*Proof, error) { - _, proof, err := s.getByCommitment(ctx, height, nID, commitment) + _, proof, err := s.getByCommitment(ctx, height, namespace, commitment) if err != nil { return nil, err } @@ -97,29 +91,29 @@ func (s *Service) GetProof( // GetAll returns all blobs under the given namespaces at the given height. // GetAll can return blobs and an error in case if some requests failed. -func (s *Service) GetAll(ctx context.Context, height uint64, nIDs []namespace.ID) ([]*Blob, error) { +func (s *Service) GetAll(ctx context.Context, height uint64, namespaces []share.Namespace) ([]*Blob, error) { header, err := s.headerGetter(ctx, height) if err != nil { return nil, err } var ( - resultBlobs = make([][]*Blob, len(nIDs)) - resultErr = make([]error, len(nIDs)) + resultBlobs = make([][]*Blob, len(namespaces)) + resultErr = make([]error, len(namespaces)) ) wg := sync.WaitGroup{} - for i, nID := range nIDs { + for i, namespace := range namespaces { wg.Add(1) - go func(i int, nID namespace.ID) { + go func(i int, namespace share.Namespace) { defer wg.Done() - blobs, err := s.getBlobs(ctx, nID, header.DAH) + blobs, err := s.getBlobs(ctx, namespace, header.DAH) if err != nil { - resultErr[i] = fmt.Errorf("getting blobs for nID(%s): %s", nID.String(), err) + resultErr[i] = fmt.Errorf("getting blobs for namespace(%s): %s", namespace.String(), err) return } resultBlobs[i] = blobs - }(i, nID) + }(i, namespace) } wg.Wait() @@ -143,7 +137,7 @@ func (s *Service) GetAll(ctx context.Context, height uint64, nIDs []namespace.ID func (s *Service) Included( ctx context.Context, height uint64, - nID namespace.ID, + namespace share.Namespace, proof *Proof, com Commitment, ) (bool, error) { @@ -156,7 +150,7 @@ func (s *Service) Included( // but we have to guarantee that all our stored subtree roots will be on the same height(e.g. one // level above shares). // TODO(@vgonkivs): rework the implementation to perform all verification without network requests. - _, resProof, err := s.getByCommitment(ctx, height, nID, com) + _, resProof, err := s.getByCommitment(ctx, height, namespace, com) switch err { case nil: case ErrBlobNotFound: @@ -172,12 +166,12 @@ func (s *Service) Included( func (s *Service) getByCommitment( ctx context.Context, height uint64, - nID namespace.ID, + namespace share.Namespace, commitment Commitment, ) (*Blob, *Proof, error) { log.Infow("requesting blob", "height", height, - "nID", nID.String()) + "namespace", namespace.String()) header, err := s.headerGetter(ctx, height) if err != nil { @@ -191,10 +185,9 @@ func (s *Service) getByCommitment( blobShare *shares.Share ) - namespacedShares, err := s.shareGetter.GetSharesByNamespace(ctx, header.DAH, nID) + namespacedShares, err := s.shareGetter.GetSharesByNamespace(ctx, header.DAH, namespace) if err != nil { - if errors.Is(err, share.ErrNamespaceNotFound) || - errors.Is(err, share.ErrNotFound) { + if errors.Is(err, share.ErrNotFound) { err = ErrBlobNotFound } return nil, nil, err @@ -210,8 +203,8 @@ func (s *Service) getByCommitment( // reconstruct the `blobShare` from the first rawShare in range // in order to get blob's length(first share will contain this info) if blobShare == nil { - for i, sh := range rawShares { - bShare, err := shares.NewShare(sh) + for i, shr := range rawShares { + bShare, err := shares.NewShare(shr) if err != nil { return nil, nil, err } @@ -279,10 +272,10 @@ func (s *Service) getByCommitment( return nil, nil, ErrBlobNotFound } -// getBlobs retrieves the DAH and fetches all shares from the requested namespace.ID and converts +// getBlobs retrieves the DAH and fetches all shares from the requested Namespace and converts // them to Blobs. -func (s *Service) getBlobs(ctx context.Context, nID namespace.ID, root *share.Root) ([]*Blob, error) { - namespacedShares, err := s.shareGetter.GetSharesByNamespace(ctx, root, nID) +func (s *Service) getBlobs(ctx context.Context, namespace share.Namespace, root *share.Root) ([]*Blob, error) { + namespacedShares, err := s.shareGetter.GetSharesByNamespace(ctx, root, namespace) if err != nil { return nil, err } diff --git a/blob/service_test.go b/blob/service_test.go index ee6e982fe8..1dcabe7129 100644 --- a/blob/service_test.go +++ b/blob/service_test.go @@ -15,16 +15,15 @@ import ( tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" "github.com/celestiaorg/celestia-app/pkg/shares" "github.com/celestiaorg/go-header/store" - "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/celestia-node/blob/blobtest" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/getters" + "github.com/celestiaorg/celestia-node/share/ipld" ) func TestBlobService_Get(t *testing.T) { @@ -37,12 +36,12 @@ func TestBlobService_Get(t *testing.T) { blobSize3 = 12 ) - appBlobs, err := blobtest.GenerateBlobs([]int{blobSize0, blobSize1}, false) + appBlobs, err := blobtest.GenerateV0Blobs([]int{blobSize0, blobSize1}, false) require.NoError(t, err) blobs0, err := convertBlobs(appBlobs...) require.NoError(t, err) - appBlobs, err = blobtest.GenerateBlobs([]int{blobSize2, blobSize3}, true) + appBlobs, err = blobtest.GenerateV0Blobs([]int{blobSize2, blobSize3}, true) require.NoError(t, err) blobs1, err := convertBlobs(appBlobs...) require.NoError(t, err) @@ -71,9 +70,9 @@ func TestBlobService_Get(t *testing.T) { }, }, { - name: "get all with the same nID", + name: "get all with the same namespace", doFn: func() (interface{}, error) { - b, err := service.GetAll(ctx, 1, []namespace.ID{blobs1[0].Namespace()}) + b, err := service.GetAll(ctx, 1, []share.Namespace{blobs1[0].Namespace()}) return b, err }, expectedResult: func(res interface{}, err error) { @@ -91,9 +90,9 @@ func TestBlobService_Get(t *testing.T) { }, }, { - name: "get all with different nIDs", + name: "get all with different namespaces", doFn: func() (interface{}, error) { - b, err := service.GetAll(ctx, 1, []namespace.ID{blobs0[0].Namespace(), blobs0[1].Namespace()}) + b, err := service.GetAll(ctx, 1, []share.Namespace{blobs0[0].Namespace(), blobs0[1].Namespace()}) return b, err }, expectedResult: func(res interface{}, err error) { @@ -126,7 +125,7 @@ func TestBlobService_Get(t *testing.T) { { name: "get invalid blob", doFn: func() (interface{}, error) { - appBlob, err := blobtest.GenerateBlobs([]int{10}, false) + appBlob, err := blobtest.GenerateV0Blobs([]int{10}, false) require.NoError(t, err) blob, err := convertBlobs(appBlob...) require.NoError(t, err) @@ -157,13 +156,13 @@ func TestBlobService_Get(t *testing.T) { proof, ok := res.(*Proof) assert.True(t, ok) - verifyFn := func(t *testing.T, rawShares [][]byte, proof *Proof, nID namespace.ID) { + verifyFn := func(t *testing.T, rawShares [][]byte, proof *Proof, namespace share.Namespace) { for _, row := range header.DAH.RowRoots { to := 0 for _, p := range *proof { from := to to = p.End() - p.Start() + from - eq := p.VerifyInclusion(sha256.New(), nID, rawShares[from:to], row) + eq := p.VerifyInclusion(sha256.New(), namespace.ToNMT(), rawShares[from:to], row) if eq == true { return } @@ -209,7 +208,7 @@ func TestBlobService_Get(t *testing.T) { { name: "not included", doFn: func() (interface{}, error) { - appBlob, err := blobtest.GenerateBlobs([]int{10}, false) + appBlob, err := blobtest.GenerateV0Blobs([]int{10}, false) require.NoError(t, err) blob, err := convertBlobs(appBlob...) require.NoError(t, err) @@ -256,8 +255,8 @@ func TestBlobService_Get(t *testing.T) { { name: "get all not found", doFn: func() (interface{}, error) { - nID := tmrand.Bytes(appconsts.NamespaceSize) - return service.GetAll(ctx, 1, []namespace.ID{nID}) + namespace := share.Namespace(tmrand.Bytes(share.NamespaceSize)) + return service.GetAll(ctx, 1, []share.Namespace{namespace}) }, expectedResult: func(i interface{}, err error) { blobs, ok := i.([]*Blob) @@ -297,27 +296,23 @@ func TestBlobService_Get(t *testing.T) { } } -// TestService_GetSingleBlobWithoutPadding creates two blobs with the same nID +// TestService_GetSingleBlobWithoutPadding creates two blobs with the same namespace // But to satisfy the rule of eds creating, padding namespace share is placed between // blobs. Test ensures that blob service will skip padding share and return the correct blob. func TestService_GetSingleBlobWithoutPadding(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) - appBlob, err := blobtest.GenerateBlobs([]int{9, 5}, true) + appBlob, err := blobtest.GenerateV0Blobs([]int{9, 5}, true) require.NoError(t, err) blobs, err := convertBlobs(appBlob...) require.NoError(t, err) - ns1, err := appns.New(blobs[0].Namespace()[0], blobs[0].Namespace()[appns.NamespaceVersionSize:]) - require.NoError(t, err) - - ns2, err := appns.New(blobs[1].Namespace()[0], blobs[1].Namespace()[appns.NamespaceVersionSize:]) - require.NoError(t, err) + ns1, ns2 := blobs[0].Namespace().ToAppNamespace(), blobs[1].Namespace().ToAppNamespace() - padding0, err := shares.NamespacePaddingShare(ns1) + padding0, err := shares.NamespacePaddingShare(ns1, appconsts.ShareVersionZero) require.NoError(t, err) - padding1, err := shares.NamespacePaddingShare(ns2) + padding1, err := shares.NamespacePaddingShare(ns2, appconsts.ShareVersionZero) require.NoError(t, err) rawShares0, err := BlobsToShares(blobs[0]) require.NoError(t, err) @@ -332,7 +327,7 @@ func TestService_GetSingleBlobWithoutPadding(t *testing.T) { batching := ds_sync.MutexWrap(ds.NewMapDatastore()) headerStore, err := store.NewStore[*header.ExtendedHeader](batching) require.NoError(t, err) - eds, err := share.AddShares(ctx, rawShares, bs) + eds, err := ipld.AddShares(ctx, rawShares, bs) require.NoError(t, err) h := headertest.ExtendedHeaderFromEDS(t, 1, eds) @@ -353,26 +348,16 @@ func TestService_GetAllWithoutPadding(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) - appBlob, err := blobtest.GenerateBlobs([]int{9, 5}, true) + appBlob, err := blobtest.GenerateV0Blobs([]int{9, 5}, true) require.NoError(t, err) blobs, err := convertBlobs(appBlob...) require.NoError(t, err) - ns1, err := appns.New( - blobs[0].Namespace()[appns.NamespaceVersionSize-1], - blobs[0].Namespace()[appns.NamespaceVersionSize:], - ) - require.NoError(t, err) - - ns2, err := appns.New( - blobs[1].Namespace()[appns.NamespaceVersionSize-1], - blobs[1].Namespace()[appns.NamespaceVersionSize:], - ) - require.NoError(t, err) + ns1, ns2 := blobs[0].Namespace().ToAppNamespace(), blobs[1].Namespace().ToAppNamespace() - padding0, err := shares.NamespacePaddingShare(ns1) + padding0, err := shares.NamespacePaddingShare(ns1, appconsts.ShareVersionZero) require.NoError(t, err) - padding1, err := shares.NamespacePaddingShare(ns2) + padding1, err := shares.NamespacePaddingShare(ns2, appconsts.ShareVersionZero) require.NoError(t, err) rawShares0, err := BlobsToShares(blobs[0]) require.NoError(t, err) @@ -393,7 +378,7 @@ func TestService_GetAllWithoutPadding(t *testing.T) { batching := ds_sync.MutexWrap(ds.NewMapDatastore()) headerStore, err := store.NewStore[*header.ExtendedHeader](batching) require.NoError(t, err) - eds, err := share.AddShares(ctx, rawShares, bs) + eds, err := ipld.AddShares(ctx, rawShares, bs) require.NoError(t, err) h := headertest.ExtendedHeaderFromEDS(t, 1, eds) @@ -406,7 +391,7 @@ func TestService_GetAllWithoutPadding(t *testing.T) { service := NewService(nil, getters.NewIPLDGetter(bs), fn) - _, err = service.GetAll(ctx, 1, []namespace.ID{blobs[0].Namespace(), blobs[1].Namespace()}) + _, err = service.GetAll(ctx, 1, []share.Namespace{blobs[0].Namespace(), blobs[1].Namespace()}) require.NoError(t, err) } @@ -417,7 +402,7 @@ func createService(ctx context.Context, t *testing.T, blobs []*Blob) *Service { require.NoError(t, err) rawShares, err := BlobsToShares(blobs...) require.NoError(t, err) - eds, err := share.AddShares(ctx, rawShares, bs) + eds, err := ipld.AddShares(ctx, rawShares, bs) require.NoError(t, err) h := headertest.ExtendedHeaderFromEDS(t, 1, eds) diff --git a/cmd/cel-shed/eds_store_stress.go b/cmd/cel-shed/eds_store_stress.go new file mode 100644 index 0000000000..62ea5cb772 --- /dev/null +++ b/cmd/cel-shed/eds_store_stress.go @@ -0,0 +1,165 @@ +package main + +import ( + "context" + "errors" + _ "expvar" + "fmt" + "math" + "net/http" + "os" + "time" + + logging "github.com/ipfs/go-log/v2" + "github.com/mitchellh/go-homedir" + "github.com/pyroscope-io/client/pyroscope" + "github.com/spf13/cobra" + + "github.com/celestiaorg/celestia-node/libs/edssser" + "github.com/celestiaorg/celestia-node/nodebuilder" + "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +const ( + edsStorePathFlag = "path" + edsWritesFlag = "writes" + edsSizeFlag = "size" + edsDisableLogFlag = "disable-log" + edsLogStatFreqFlag = "log-stat-freq" + edsCleanupFlag = "cleanup" + edsFreshStartFlag = "fresh" + + pyroscopeEndpointFlag = "pyroscope" + putTimeoutFlag = "timeout" + badgerLogLevelFlag = "badger-log-level" +) + +func init() { + edsStoreCmd.AddCommand(edsStoreStress) + + defaultPath := "~/.edssser" + path, err := homedir.Expand(defaultPath) + if err != nil { + panic(err) + } + + pathFlagUsage := fmt.Sprintf("Directory path to use for stress test. Uses %s by default.", defaultPath) + edsStoreStress.Flags().String(edsStorePathFlag, path, pathFlagUsage) + edsStoreStress.Flags().String(pyroscopeEndpointFlag, "", + "Pyroscope address. If no address provided, pyroscope will be disabled") + edsStoreStress.Flags().Int(edsWritesFlag, math.MaxInt, "Total EDS writes to make. MaxInt by default.") + edsStoreStress.Flags().Int(edsSizeFlag, 128, "Chooses EDS size. 128 by default.") + edsStoreStress.Flags().Bool(edsDisableLogFlag, false, "Disables logging. Enabled by default.") + edsStoreStress.Flags().Int(edsLogStatFreqFlag, 10, "Write statistic logging frequency. 10 by default.") + edsStoreStress.Flags().Bool(edsCleanupFlag, false, "Cleans up the store on stop. Disabled by default.") + edsStoreStress.Flags().Bool(edsFreshStartFlag, false, "Cleanup previous state on start. Disabled by default.") + edsStoreStress.Flags().Int(putTimeoutFlag, 30, "Sets put timeout in seconds. 30 sec by default.") + edsStoreStress.Flags().String(badgerLogLevelFlag, "INFO", "Badger log level, Defaults to INFO") + + // kill redundant print + nodebuilder.PrintKeyringInfo = false +} + +var edsStoreCmd = &cobra.Command{ + Use: "eds-store [subcommand]", + Short: "Collection of eds-store related utilities", +} + +var edsStoreStress = &cobra.Command{ + Use: "stress", + Short: `Runs eds.Store stress test over default node.Store Datastore backend (e.g. Badger).`, + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) (err error) { + // expose expvar vars over http + go http.ListenAndServe(":9999", http.DefaultServeMux) //nolint:errcheck,gosec + + endpoint, _ := cmd.Flags().GetString(pyroscopeEndpointFlag) + if endpoint != "" { + _, err = pyroscope.Start(pyroscope.Config{ + ApplicationName: "cel-shred.stresser", + ServerAddress: endpoint, + ProfileTypes: []pyroscope.ProfileType{ + pyroscope.ProfileCPU, + pyroscope.ProfileAllocObjects, + pyroscope.ProfileAllocSpace, + pyroscope.ProfileInuseObjects, + pyroscope.ProfileInuseSpace, + }, + }) + if err != nil { + fmt.Printf("failed to launch pyroscope with addr: %s err: %s\n", endpoint, err.Error()) + } else { + fmt.Println("connected pyroscope to:", endpoint) + } + } + + path, _ := cmd.Flags().GetString(edsStorePathFlag) + fmt.Printf("using %s\n", path) + + freshStart, _ := cmd.Flags().GetBool(edsFreshStartFlag) + if freshStart { + err = os.RemoveAll(path) + if err != nil { + return err + } + } + + cleanup, _ := cmd.Flags().GetBool(edsCleanupFlag) + if cleanup { + defer func() { + err = errors.Join(err, os.RemoveAll(path)) + }() + } + + loglevel, _ := cmd.Flags().GetString(badgerLogLevelFlag) + if err = logging.SetLogLevel("badger", loglevel); err != nil { + return err + } + + disableLog, _ := cmd.Flags().GetBool(edsDisableLogFlag) + logFreq, _ := cmd.Flags().GetInt(edsLogStatFreqFlag) + edsWrites, _ := cmd.Flags().GetInt(edsWritesFlag) + edsSize, _ := cmd.Flags().GetInt(edsSizeFlag) + putTimeout, _ := cmd.Flags().GetInt(putTimeoutFlag) + + cfg := edssser.Config{ + EDSSize: edsSize, + EDSWrites: edsWrites, + EnableLog: !disableLog, + LogFilePath: path, + StatLogFreq: logFreq, + OpTimeout: time.Duration(putTimeout) * time.Second, + } + + err = nodebuilder.Init(*nodebuilder.DefaultConfig(node.Full), path, node.Full) + if err != nil { + return err + } + + nodestore, err := nodebuilder.OpenStore(path, nil) + if err != nil { + return err + } + defer func() { + err = errors.Join(err, nodestore.Close()) + }() + + datastore, err := nodestore.Datastore() + if err != nil { + return err + } + + stresser, err := edssser.NewEDSsser(path, datastore, cfg) + if err != nil { + return err + } + + stats, err := stresser.Run(cmd.Context()) + if !errors.Is(err, context.Canceled) { + return err + } + + fmt.Printf("%s", stats.Finalize()) + return nil + }, +} diff --git a/cmd/cel-shed/main.go b/cmd/cel-shed/main.go index 7982cfc1be..872bbb48a9 100644 --- a/cmd/cel-shed/main.go +++ b/cmd/cel-shed/main.go @@ -3,12 +3,14 @@ package main import ( "context" "os" + "os/signal" + "syscall" "github.com/spf13/cobra" ) func init() { - rootCmd.AddCommand(p2pCmd, headerCmd) + rootCmd.AddCommand(p2pCmd, headerCmd, edsStoreCmd) } var rootCmd = &cobra.Command{ @@ -26,5 +28,8 @@ func main() { } func run() error { - return rootCmd.ExecuteContext(context.Background()) + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + + return rootCmd.ExecuteContext(ctx) } diff --git a/cmd/celestia/blob.go b/cmd/celestia/blob.go new file mode 100644 index 0000000000..8ab130c24d --- /dev/null +++ b/cmd/celestia/blob.go @@ -0,0 +1,227 @@ +package main + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "os" + "reflect" + "strconv" + + "github.com/spf13/cobra" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/share" +) + +var base64Flag bool + +func init() { + blobCmd.AddCommand(getCmd, getAllCmd, submitCmd, getProofCmd) + + getCmd.PersistentFlags().BoolVar( + &base64Flag, + "base64", + false, + "printed blob's data a base64 string", + ) + getAllCmd.PersistentFlags().BoolVar( + &base64Flag, + "base64", + false, + "printed blob's data as a base64 string", + ) +} + +var blobCmd = &cobra.Command{ + Use: "blob [command]", + Short: "Allows to interact with the Blob Service via JSON-RPC", + Args: cobra.NoArgs, +} + +var getCmd = &cobra.Command{ + Use: "get [height, namespace, commitment]", + Args: cobra.ExactArgs(3), + Short: "Returns the blob for the given namespace by commitment at a particular height.", + RunE: func(cmd *cobra.Command, args []string) error { + client, err := rpcClient(cmd.Context()) + if err != nil { + return err + } + + height, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a height:%v", err) + } + + namespace, err := parseV0Namespace(args[1]) + if err != nil { + return fmt.Errorf("error parsing a namespace:%v", err) + } + + commitment, err := base64.StdEncoding.DecodeString(args[2]) + if err != nil { + return fmt.Errorf("error parsing a commitment:%v", err) + } + + blob, err := client.Blob.Get(cmd.Context(), height, namespace, commitment) + + printOutput(blob, err) + return nil + }, +} + +var getAllCmd = &cobra.Command{ + Use: "get-all [height, namespace]", + Args: cobra.ExactArgs(2), + Short: "Returns all blobs for the given namespace at a particular height.", + RunE: func(cmd *cobra.Command, args []string) error { + client, err := rpcClient(cmd.Context()) + if err != nil { + return err + } + + height, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a height:%v", err) + } + + namespace, err := parseV0Namespace(args[1]) + if err != nil { + return fmt.Errorf("error parsing a namespace:%v", err) + } + + blobs, err := client.Blob.GetAll(cmd.Context(), height, []share.Namespace{namespace}) + + printOutput(blobs, err) + return nil + }, +} + +var submitCmd = &cobra.Command{ + Use: "submit [namespace, blobData]", + Args: cobra.ExactArgs(2), + Short: "Submit the blob at the given namespace. Note: only one blob is allowed to submit through the RPC.", + RunE: func(cmd *cobra.Command, args []string) error { + client, err := rpcClient(cmd.Context()) + if err != nil { + return err + } + + namespace, err := parseV0Namespace(args[0]) + if err != nil { + return fmt.Errorf("error parsing a namespace:%v", err) + } + + parsedBlob, err := blob.NewBlobV0(namespace, []byte(args[1])) + if err != nil { + return fmt.Errorf("error creating a blob:%v", err) + } + + height, err := client.Blob.Submit(cmd.Context(), []*blob.Blob{parsedBlob}) + + response := struct { + Height uint64 `json:"height"` + Commitment blob.Commitment `json:"commitment"` + }{ + Height: height, + Commitment: parsedBlob.Commitment, + } + + printOutput(response, err) + return nil + }, +} + +var getProofCmd = &cobra.Command{ + Use: "get-proof [height, namespace, commitment]", + Args: cobra.ExactArgs(3), + Short: "Retrieves the blob in the given namespaces at the given height by commitment and returns its Proof.", + RunE: func(cmd *cobra.Command, args []string) error { + client, err := rpcClient(cmd.Context()) + if err != nil { + return err + } + + height, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a height:%v", err) + } + + namespace, err := parseV0Namespace(args[1]) + if err != nil { + return fmt.Errorf("error parsing a namespace:%v", err) + } + + commitment, err := base64.StdEncoding.DecodeString(args[2]) + if err != nil { + return fmt.Errorf("error parsing a commitment:%v", err) + } + + proof, err := client.Blob.GetProof(cmd.Context(), height, namespace, commitment) + + printOutput(proof, err) + return nil + }, +} + +func printOutput(data interface{}, err error) { + if err != nil { + data = err + } + + if !base64Flag && err == nil { + data = formatData(data) + } + + resp := struct { + Result interface{} `json:"result"` + }{ + Result: data, + } + + bytes, err := json.MarshalIndent(resp, "", " ") + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + fmt.Fprintln(os.Stdout, string(bytes)) +} + +func formatData(data interface{}) interface{} { + type tempBlob struct { + Namespace []byte `json:"namespace"` + Data string `json:"data"` + ShareVersion uint32 `json:"share_version"` + Commitment []byte `json:"commitment"` + } + + if reflect.TypeOf(data).Kind() == reflect.Slice { + blobs, ok := data.([]*blob.Blob) + if !ok { + return data + } + + result := make([]tempBlob, len(blobs)) + for i, b := range blobs { + result[i] = tempBlob{ + Namespace: b.Namespace(), + Data: string(b.Data), + ShareVersion: b.ShareVersion, + Commitment: b.Commitment, + } + } + return result + } + + b, ok := data.(*blob.Blob) + if !ok { + return data + } + return tempBlob{ + Namespace: b.Namespace(), + Data: string(b.Data), + ShareVersion: b.ShareVersion, + Commitment: b.Commitment, + } +} diff --git a/cmd/celestia/logs.go b/cmd/celestia/logs.go new file mode 100644 index 0000000000..ac302ff6dd --- /dev/null +++ b/cmd/celestia/logs.go @@ -0,0 +1,48 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/celestiaorg/celestia-node/cmd" +) + +var logCmd = &cobra.Command{ + Use: cmd.LogLevelFlag, + Args: cobra.ExactArgs(1), + Short: "Allows to set log level for all modules to " + + "`DEBUG, INFO, WARN, ERROR, DPANIC, PANIC, FATAL and their lower-case forms`", + + RunE: func(c *cobra.Command, args []string) error { + client, err := rpcClient(c.Context()) + if err != nil { + return err + } + return client.Node.LogLevelSet(c.Context(), "*", args[0]) + }, +} + +var logModuleCmd = &cobra.Command{ + Use: cmd.LogLevelModuleFlag, + Args: cobra.MinimumNArgs(1), + Short: "Allows to set log level for a particular module in format :", + RunE: func(c *cobra.Command, args []string) error { + client, err := rpcClient(c.Context()) + if err != nil { + return err + } + for _, ll := range args { + params := strings.Split(ll, ":") + if len(params) != 2 { + return fmt.Errorf("cmd: %s arg must be in form :,"+ + "e.g. pubsub:debug", cmd.LogLevelModuleFlag) + } + if err = client.Node.LogLevelSet(c.Context(), params[0], params[1]); err != nil { + return err + } + } + return nil + }, +} diff --git a/cmd/celestia/rpc.go b/cmd/celestia/rpc.go index 767fca872c..c263496b26 100644 --- a/cmd/celestia/rpc.go +++ b/cmd/celestia/rpc.go @@ -2,9 +2,11 @@ package main import ( "bytes" + "context" "encoding/base64" "encoding/hex" "encoding/json" + "errors" "fmt" "io" "log" @@ -16,17 +18,12 @@ import ( "github.com/spf13/cobra" - "github.com/celestiaorg/nmt/namespace" - "github.com/celestiaorg/celestia-node/api/rpc/client" - "github.com/celestiaorg/celestia-node/blob" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/state" ) -const ( - authEnvKey = "CELESTIA_NODE_AUTH_TOKEN" -) +const authEnvKey = "CELESTIA_NODE_AUTH_TOKEN" //nolint:gosec var requestURL string var authTokenFlag string @@ -63,6 +60,8 @@ func init() { false, "Print JSON-RPC request along with the response", ) + rpcCmd.AddCommand(logCmd, logModuleCmd) + rpcCmd.AddCommand(blobCmd) rootCmd.AddCommand(rpcCmd) } @@ -70,6 +69,25 @@ var rpcCmd = &cobra.Command{ Use: "rpc [namespace] [method] [params...]", Short: "Send JSON-RPC request", Args: cobra.MinimumNArgs(2), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + rpcClient, err := newRPCClient(cmd.Context()) + if err != nil { + return err + } + + ctx := context.WithValue(cmd.Context(), rpcClientKey{}, rpcClient) + cmd.SetContext(ctx) + return nil + }, + PersistentPostRunE: func(cmd *cobra.Command, args []string) error { + client, err := rpcClient(cmd.Context()) + if err != nil { + return err + } + + client.Close() + return nil + }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { modules := client.Modules if len(args) == 0 { @@ -102,133 +120,35 @@ var rpcCmd = &cobra.Command{ func parseParams(method string, params []string) []interface{} { parsedParams := make([]interface{}, len(params)) - + validateParamsFn := func(has, want int) error { + if has != want { + return fmt.Errorf("rpc: invalid amount of params. has=%d, want=%d", has, want) + } + return nil + } switch method { case "GetSharesByNamespace": + if err := validateParamsFn(len(params), 2); err != nil { + panic(err) + } // 1. Share Root root, err := parseJSON(params[0]) if err != nil { panic(fmt.Errorf("couldn't parse share root as json: %v", err)) } parsedParams[0] = root - // 2. NamespaceID - nID, err := parseNamespace(params[1]) - if err != nil { - panic(fmt.Sprintf("Error parsing namespace: %v", err)) - } - parsedParams[1] = nID - case "Submit": - // 1. NamespaceID - var err error - nID, err := parseNamespace(params[0]) - if err != nil { - panic(fmt.Sprintf("Error parsing namespace: %v", err)) - } - // 2. Blob data - var blobData []byte - switch { - case strings.HasPrefix(params[1], "0x"): - decoded, err := hex.DecodeString(params[1][2:]) - if err != nil { - panic("Error decoding blob: hex string could not be decoded.") - } - blobData = decoded - case strings.HasPrefix(params[1], "\""): - // user input an utf string that needs to be encoded to base64 - src := []byte(params[1]) - blobData = make([]byte, base64.StdEncoding.EncodedLen(len(src))) - base64.StdEncoding.Encode(blobData, []byte(params[1])) - default: - // otherwise, we assume the user has already encoded their input to base64 - blobData, err = base64.StdEncoding.DecodeString(params[1]) - if err != nil { - panic("Error decoding blob data: base64 string could not be decoded.") - } - } - parsedBlob, err := blob.NewBlob(0, nID, blobData) - if err != nil { - panic(fmt.Sprintf("Error creating blob: %v", err)) - } - parsedParams[0] = []*blob.Blob{parsedBlob} - // param count doesn't match input length, so cut off nil values - return parsedParams[:1] - case "SubmitPayForBlob": - // 1. Fee (state.Int is a string) - parsedParams[0] = params[0] - // 2. GasLimit (uint64) - num, err := strconv.ParseUint(params[1], 10, 64) - if err != nil { - panic("Error parsing gas limit: uint64 could not be parsed.") - } - parsedParams[1] = num - // 3. NamespaceID - nID, err := parseNamespace(params[2]) - if err != nil { - panic(fmt.Sprintf("Error parsing namespace: %v", err)) - } - // 4. Blob data - var blobData []byte - switch { - case strings.HasPrefix(params[3], "0x"): - decoded, err := hex.DecodeString(params[3][2:]) - if err != nil { - panic("Error decoding blob: hex string could not be decoded.") - } - blobData = decoded - case strings.HasPrefix(params[3], "\""): - // user input an utf string that needs to be encoded to base64 - src := []byte(params[1]) - blobData = make([]byte, base64.StdEncoding.EncodedLen(len(src))) - base64.StdEncoding.Encode(blobData, []byte(params[3])) - default: - // otherwise, we assume the user has already encoded their input to base64 - blobData, err = base64.StdEncoding.DecodeString(params[3]) - if err != nil { - panic("Error decoding blob: base64 string could not be decoded.") - } - } - parsedBlob, err := blob.NewBlob(0, nID, blobData) - if err != nil { - panic(fmt.Sprintf("Error creating blob: %v", err)) - } - parsedParams[2] = []*blob.Blob{parsedBlob} - return parsedParams[:3] - case "Get": - // 1. Height - num, err := strconv.ParseUint(params[0], 10, 64) - if err != nil { - panic("Error parsing gas limit: uint64 could not be parsed.") - } - parsedParams[0] = num - // 2. NamespaceID - nID, err := parseNamespace(params[1]) - if err != nil { - panic(fmt.Sprintf("Error parsing namespace: %v", err)) - } - parsedParams[1] = nID - // 3: Commitment - commitment, err := base64.StdEncoding.DecodeString(params[2]) - if err != nil { - panic("Error decoding commitment: base64 string could not be decoded.") - } - parsedParams[2] = commitment - return parsedParams - case "GetAll": // NOTE: Over the cli, you can only pass one namespace - // 1. Height - num, err := strconv.ParseUint(params[0], 10, 64) - if err != nil { - panic("Error parsing gas limit: uint64 could not be parsed.") - } - parsedParams[0] = num - // 2. NamespaceID - nID, err := parseNamespace(params[1]) + // 2. Namespace + namespace, err := parseV0Namespace(params[1]) if err != nil { panic(fmt.Sprintf("Error parsing namespace: %v", err)) } - parsedParams[1] = []namespace.ID{nID} + parsedParams[1] = namespace return parsedParams case "QueryDelegation", "QueryUnbonding", "BalanceForAddress": var err error + if err = validateParamsFn(len(params), 2); err != nil { + panic(err) + } parsedParams[0], err = parseAddressFromString(params[0]) if err != nil { panic(fmt.Errorf("error parsing address: %w", err)) @@ -248,6 +168,9 @@ func parseParams(method string, params []string) []interface{} { case "Transfer", "Delegate", "Undelegate": // 1. Address var err error + if err = validateParamsFn(len(params), 4); err != nil { + panic(err) + } parsedParams[0], err = parseAddressFromString(params[0]) if err != nil { panic(fmt.Errorf("error parsing address: %w", err)) @@ -265,6 +188,9 @@ func parseParams(method string, params []string) []interface{} { case "CancelUnbondingDelegation": // 1. Validator Address var err error + if err = validateParamsFn(len(params), 5); err != nil { + panic(err) + } parsedParams[0], err = parseAddressFromString(params[0]) if err != nil { panic(fmt.Errorf("error parsing address: %w", err)) @@ -279,9 +205,13 @@ func parseParams(method string, params []string) []interface{} { panic("Error parsing gas limit: uint64 could not be parsed.") } parsedParams[4] = num + return parsedParams case "BeginRedelegate": // 1. Source Validator Address var err error + if err = validateParamsFn(len(params), 5); err != nil { + panic(err) + } parsedParams[0], err = parseAddressFromString(params[0]) if err != nil { panic(fmt.Errorf("error parsing address: %w", err)) @@ -300,6 +230,7 @@ func parseParams(method string, params []string) []interface{} { panic("Error parsing gas limit: uint64 could not be parsed.") } parsedParams[4] = num + return parsedParams default: } @@ -321,7 +252,6 @@ func parseParams(method string, params []string) []interface{} { parsedParams[i] = param } } - return parsedParams } @@ -368,7 +298,7 @@ func sendJSONRPCRequest(namespace, method string, params []interface{}) { rawResponseJSON, err := parseJSON(string(responseBody)) if err != nil { - panic(err) + log.Fatalf("Error parsing JSON-RPC response: %v", err) } if printRequest { output, err := json.MarshalIndent(outputWithRequest{ @@ -418,30 +348,34 @@ func parseSignatureForHelpstring(methodSig reflect.StructField) string { return simplifiedSignature } -func parseNamespace(param string) (namespace.ID, error) { - var nID []byte - var err error +// parseV0Namespace parses a namespace from a base64 or hex string. The param +// is expected to be the user-specified portion of a v0 namespace ID (i.e. the +// last 10 bytes). +func parseV0Namespace(param string) (share.Namespace, error) { + userBytes, err := decodeToBytes(param) + if err != nil { + return nil, err + } + + // if the namespace ID is <= 10 bytes, left pad it with 0s + return share.NewBlobNamespaceV0(userBytes) +} + +// decodeToBytes decodes a Base64 or hex input string into a byte slice. +func decodeToBytes(param string) ([]byte, error) { if strings.HasPrefix(param, "0x") { decoded, err := hex.DecodeString(param[2:]) if err != nil { return nil, fmt.Errorf("error decoding namespace ID: %w", err) } - nID = decoded - } else { - // otherwise, it's just a base64 string - nID, err = base64.StdEncoding.DecodeString(param) - if err != nil { - return nil, fmt.Errorf("error decoding namespace ID: %w", err) - } + return decoded, nil } - // if the namespace ID is 8 bytes, add v0 share + namespace prefix and zero pad - if len(nID) == 8 { - nID, err = share.NewNamespaceV0(nID) - if err != nil { - return nil, err - } + // otherwise, it's just a base64 string + decoded, err := base64.StdEncoding.DecodeString(param) + if err != nil { + return nil, fmt.Errorf("error decoding namespace ID: %w", err) } - return nID, nil + return decoded, nil } func parseJSON(param string) (json.RawMessage, error) { @@ -449,3 +383,20 @@ func parseJSON(param string) (json.RawMessage, error) { err := json.Unmarshal([]byte(param), &raw) return raw, err } + +func newRPCClient(ctx context.Context) (*client.Client, error) { + if authTokenFlag == "" { + authTokenFlag = os.Getenv(authEnvKey) + } + return client.NewClient(ctx, requestURL, authTokenFlag) +} + +type rpcClientKey struct{} + +func rpcClient(ctx context.Context) (*client.Client, error) { + client, ok := ctx.Value(rpcClientKey{}).(*client.Client) + if !ok { + return nil, errors.New("rpc client was not set") + } + return client, nil +} diff --git a/cmd/celestia/rpc_test.go b/cmd/celestia/rpc_test.go new file mode 100644 index 0000000000..53087646a7 --- /dev/null +++ b/cmd/celestia/rpc_test.go @@ -0,0 +1,81 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/celestiaorg/celestia-node/share" +) + +func Test_parseNamespaceID(t *testing.T) { + type testCase struct { + name string + param string + want share.Namespace + wantErr bool + } + testCases := []testCase{ + { + param: "0x0c204d39600fddd3", + name: "8 byte hex encoded namespace ID gets left padded", + want: share.Namespace{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x20, 0x4d, 0x39, 0x60, 0xf, 0xdd, 0xd3, + }, + wantErr: false, + }, + { + name: "10 byte hex encoded namespace ID", + param: "0x42690c204d39600fddd3", + want: share.Namespace{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x42, 0x69, 0xc, 0x20, 0x4d, 0x39, 0x60, 0xf, 0xdd, 0xd3, + }, + wantErr: false, + }, + { + name: "29 byte hex encoded namespace ID", + param: "0x0000000000000000000000000000000000000001010101010101010101", + want: share.Namespace{ + 0x0, // namespace version + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // v0 ID prefix + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, // namespace ID + }, + wantErr: true, + }, + { + name: "11 byte hex encoded namespace ID returns error", + param: "0x42690c204d39600fddd3a3", + want: share.Namespace{}, + wantErr: true, + }, + { + name: "10 byte base64 encoded namespace ID", + param: "QmkMIE05YA/d0w==", + want: share.Namespace{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x42, 0x69, 0xc, 0x20, 0x4d, 0x39, 0x60, 0xf, 0xdd, 0xd3, + }, + wantErr: false, + }, + { + name: "not base64 or hex encoded namespace ID returns error", + param: "5748493939429", + want: share.Namespace{}, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := parseV0Namespace(tc.param) + if tc.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/cmd/celestia/util.go b/cmd/celestia/util.go index 85505c3a60..a38860d1f7 100644 --- a/cmd/celestia/util.go +++ b/cmd/celestia/util.go @@ -26,12 +26,6 @@ func persistentPreRunEnv(cmd *cobra.Command, nodeType node.Type, _ []string) err return err } ctx = cmdnode.WithNetwork(ctx, parsedNetwork) - ctx = cmdnode.WithNodeBuildInfo(ctx, &node.BuildInfo{ - LastCommit: lastCommit, - SemanticVersion: semanticVersion, - SystemVersion: systemVersion, - GolangVersion: golangVersion, - }) // loads existing config into the environment ctx, err = cmdnode.ParseNodeFlags(ctx, cmd, cmdnode.Network(ctx)) diff --git a/cmd/celestia/version.go b/cmd/celestia/version.go index 462f17b474..f0d379e7a7 100644 --- a/cmd/celestia/version.go +++ b/cmd/celestia/version.go @@ -2,18 +2,10 @@ package main import ( "fmt" - "runtime" "github.com/spf13/cobra" -) - -var ( - buildTime string - lastCommit string - semanticVersion string - systemVersion = fmt.Sprintf("%s/%s", runtime.GOARCH, runtime.GOOS) - golangVersion = runtime.Version() + "github.com/celestiaorg/celestia-node/nodebuilder/node" ) var versionCmd = &cobra.Command{ @@ -24,9 +16,10 @@ var versionCmd = &cobra.Command{ } func printBuildInfo(_ *cobra.Command, _ []string) { - fmt.Printf("Semantic version: %s\n", semanticVersion) - fmt.Printf("Commit: %s\n", lastCommit) - fmt.Printf("Build Date: %s\n", buildTime) - fmt.Printf("System version: %s\n", systemVersion) - fmt.Printf("Golang version: %s\n", golangVersion) + buildInfo := node.GetBuildInfo() + fmt.Printf("Semantic version: %s\n", buildInfo.SemanticVersion) + fmt.Printf("Commit: %s\n", buildInfo.LastCommit) + fmt.Printf("Build Date: %s\n", buildInfo.BuildTime) + fmt.Printf("System version: %s\n", buildInfo.SystemVersion) + fmt.Printf("Golang version: %s\n", buildInfo.GolangVersion) } diff --git a/cmd/env.go b/cmd/env.go index ca915d884f..f9860a2de8 100644 --- a/cmd/env.go +++ b/cmd/env.go @@ -38,11 +38,6 @@ func NodeConfig(ctx context.Context) nodebuilder.Config { return cfg } -// NodeInfo reads the node build inforamtion from the context. -func NodeInfo(ctx context.Context) node.BuildInfo { - return ctx.Value(buildInfo{}).(node.BuildInfo) -} - // WithNodeType sets the node type in the given context. func WithNodeType(ctx context.Context, tp node.Type) context.Context { return context.WithValue(ctx, nodeTypeKey{}, tp) @@ -78,16 +73,10 @@ func WithNodeConfig(ctx context.Context, config *nodebuilder.Config) context.Con return context.WithValue(ctx, configKey{}, *config) } -// WithNodeConfig sets the node config build information. -func WithNodeBuildInfo(ctx context.Context, info *node.BuildInfo) context.Context { - return context.WithValue(ctx, buildInfo{}, *info) -} - type ( optionsKey struct{} configKey struct{} storePathKey struct{} nodeTypeKey struct{} networkKey struct{} - buildInfo struct{} ) diff --git a/cmd/flags_misc.go b/cmd/flags_misc.go index 4483e17201..cd539bde4c 100644 --- a/cmd/flags_misc.go +++ b/cmd/flags_misc.go @@ -11,13 +11,8 @@ import ( otelpyroscope "github.com/pyroscope-io/otel-profiling-go" "github.com/spf13/cobra" flag "github.com/spf13/pflag" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" - "go.opentelemetry.io/otel/sdk/resource" - tracesdk "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.11.0" - "go.opentelemetry.io/otel/trace" "github.com/celestiaorg/celestia-node/logs" "github.com/celestiaorg/celestia-node/nodebuilder" @@ -25,8 +20,8 @@ import ( ) var ( - logLevelFlag = "log.level" - logLevelModuleFlag = "log.level.module" + LogLevelFlag = "log.level" + LogLevelModuleFlag = "log.level.module" pprofFlag = "pprof" tracingFlag = "tracing" tracingEndpointFlag = "tracing.endpoint" @@ -45,14 +40,14 @@ func MiscFlags() *flag.FlagSet { flags := &flag.FlagSet{} flags.String( - logLevelFlag, + LogLevelFlag, "INFO", `DEBUG, INFO, WARN, ERROR, DPANIC, PANIC, FATAL and their lower-case forms`, ) flags.StringSlice( - logLevelModuleFlag, + LogLevelModuleFlag, nil, ":, e.g. pubsub:debug", ) @@ -128,24 +123,24 @@ and their lower-case forms`, // ParseMiscFlags parses miscellaneous flags from the given cmd and applies values to Env. func ParseMiscFlags(ctx context.Context, cmd *cobra.Command) (context.Context, error) { - logLevel := cmd.Flag(logLevelFlag).Value.String() + logLevel := cmd.Flag(LogLevelFlag).Value.String() if logLevel != "" { level, err := logging.LevelFromString(logLevel) if err != nil { - return ctx, fmt.Errorf("cmd: while parsing '%s': %w", logLevelFlag, err) + return ctx, fmt.Errorf("cmd: while parsing '%s': %w", LogLevelFlag, err) } logs.SetAllLoggers(level) } - logModules, err := cmd.Flags().GetStringSlice(logLevelModuleFlag) + logModules, err := cmd.Flags().GetStringSlice(LogLevelModuleFlag) if err != nil { panic(err) } for _, ll := range logModules { params := strings.Split(ll, ":") if len(params) != 2 { - return ctx, fmt.Errorf("cmd: %s arg must be in form :, e.g. pubsub:debug", logLevelModuleFlag) + return ctx, fmt.Errorf("cmd: %s arg must be in form :, e.g. pubsub:debug", LogLevelModuleFlag) } err := logging.SetLogLevel(params[0], params[1]) @@ -199,7 +194,6 @@ func ParseMiscFlags(ctx context.Context, cmd *cobra.Command) (context.Context, e } if ok { - var tp trace.TracerProvider opts := []otlptracehttp.Option{ otlptracehttp.WithCompression(otlptracehttp.GzipCompression), otlptracehttp.WithEndpoint(cmd.Flag(tracingEndpointFlag).Value.String()), @@ -210,30 +204,13 @@ func ParseMiscFlags(ctx context.Context, cmd *cobra.Command) (context.Context, e opts = append(opts, otlptracehttp.WithInsecure()) } - exp, err := otlptracehttp.New(cmd.Context(), opts...) - if err != nil { - return ctx, err - } - - tp = tracesdk.NewTracerProvider( - tracesdk.WithSampler(tracesdk.AlwaysSample()), - // Always be sure to batch in production. - tracesdk.WithBatcher(exp), - // Record information about this application in a Resource. - tracesdk.WithResource(resource.NewWithAttributes( - semconv.SchemaURL, - semconv.ServiceNameKey.String(fmt.Sprintf("Celestia-%s", NodeType(ctx).String())), - // TODO(@Wondertan): Versioning: semconv.ServiceVersionKey - )), - ) - + pyroOpts := make([]otelpyroscope.Option, 0) ok, err = cmd.Flags().GetBool(pyroscopeTracing) if err != nil { panic(err) } if ok { - tp = otelpyroscope.NewTracerProvider( - tp, + pyroOpts = append(pyroOpts, otelpyroscope.WithAppName("celestia.da-node"), otelpyroscope.WithPyroscopeURL(cmd.Flag(pyroscopeEndpoint).Value.String()), otelpyroscope.WithRootSpanOnly(true), @@ -242,8 +219,7 @@ func ParseMiscFlags(ctx context.Context, cmd *cobra.Command) (context.Context, e otelpyroscope.WithProfileBaselineURL(true), ) } - - otel.SetTracerProvider(tp) + ctx = WithNodeOptions(ctx, nodebuilder.WithTraces(opts, pyroOpts)) } ok, err = cmd.Flags().GetBool(metricsFlag) @@ -262,7 +238,7 @@ func ParseMiscFlags(ctx context.Context, cmd *cobra.Command) (context.Context, e opts = append(opts, otlpmetrichttp.WithInsecure()) } - ctx = WithNodeOptions(ctx, nodebuilder.WithMetrics(opts, NodeType(ctx), NodeInfo(ctx))) + ctx = WithNodeOptions(ctx, nodebuilder.WithMetrics(opts, NodeType(ctx))) } ok, err = cmd.Flags().GetBool(p2pMetrics) diff --git a/core/client_test.go b/core/client_test.go index 7467b3c3a3..8ad9060555 100644 --- a/core/client_test.go +++ b/core/client_test.go @@ -10,7 +10,7 @@ import ( ) func TestRemoteClient_Status(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) client := StartTestNode(t).Client @@ -20,7 +20,7 @@ func TestRemoteClient_Status(t *testing.T) { } func TestRemoteClient_StartBlockSubscription_And_GetBlock(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) t.Cleanup(cancel) client := StartTestNode(t).Client diff --git a/core/eds.go b/core/eds.go index dc9b2b4a9e..eb93c249ba 100644 --- a/core/eds.go +++ b/core/eds.go @@ -3,13 +3,17 @@ package core import ( "context" "errors" + "fmt" "github.com/filecoin-project/dagstore" "github.com/tendermint/tendermint/types" + "github.com/celestiaorg/celestia-app/app" "github.com/celestiaorg/celestia-app/pkg/appconsts" - "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/shares" "github.com/celestiaorg/celestia-app/pkg/square" + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" @@ -19,22 +23,31 @@ import ( // extendBlock extends the given block data, returning the resulting // ExtendedDataSquare (EDS). If there are no transactions in the block, // nil is returned in place of the eds. -func extendBlock(data types.Data) (*rsmt2d.ExtendedDataSquare, error) { - if len(data.Txs) == 0 && data.SquareSize == uint64(1) { +func extendBlock(data types.Data, appVersion uint64, options ...nmt.Option) (*rsmt2d.ExtendedDataSquare, error) { + if app.IsEmptyBlock(data, appVersion) { return nil, nil } - sqr, err := square.Construct(data.Txs.ToSliceOfBytes(), appconsts.LatestVersion, share.MaxSquareSize) + // Construct the data square from the block's transactions + dataSquare, err := square.Construct(data.Txs.ToSliceOfBytes(), appVersion, appconsts.SquareSizeUpperBound(appVersion)) if err != nil { return nil, err } + return extendShares(shares.ToBytes(dataSquare), options...) +} - shares := make([][]byte, len(sqr)) - for i, s := range sqr { - shares[i] = s.ToBytes() +func extendShares(s [][]byte, options ...nmt.Option) (*rsmt2d.ExtendedDataSquare, error) { + // Check that the length of the square is a power of 2. + if !shares.IsPowerOfTwo(len(s)) { + return nil, fmt.Errorf("number of shares is not a power of 2: got %d", len(s)) } - - return da.ExtendShares(shares) + // here we construct a tree + // Note: uses the nmt wrapper to construct the tree. + squareSize := square.Size(len(s)) + return rsmt2d.ComputeExtendedDataSquare(s, + appconsts.DefaultCodec(), + wrapper.NewConstructor(uint64(squareSize), + options...)) } // storeEDS will only store extended block if it is not empty and doesn't already exist. diff --git a/core/eds_test.go b/core/eds_test.go index 6bc04c96c4..6a2026ee58 100644 --- a/core/eds_test.go +++ b/core/eds_test.go @@ -7,6 +7,8 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/types" + "github.com/celestiaorg/celestia-app/app" + "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/celestia-node/share" @@ -21,7 +23,7 @@ func TestTrulyEmptySquare(t *testing.T) { SquareSize: 1, } - eds, err := extendBlock(data) + eds, err := extendBlock(data, appconsts.LatestVersion) require.NoError(t, err) assert.Nil(t, eds) } @@ -32,14 +34,20 @@ func TestTrulyEmptySquare(t *testing.T) { // construction of the square is deterministic, and the rules which dictate the // square size do not allow for empty block data. However, should that ever // occur, we need to ensure that the correct data root is generated. -func TestNonEmptySquareWithZeroTxs(t *testing.T) { +func TestEmptySquareWithZeroTxs(t *testing.T) { data := types.Data{ - Txs: []types.Tx{}, - SquareSize: 16, + Txs: []types.Tx{}, } - eds, err := extendBlock(data) + eds, err := extendBlock(data, appconsts.LatestVersion) + require.Nil(t, eds) + require.NoError(t, err) + + // force extend the square using an empty block and compare with the min DAH + eds, err = app.ExtendBlock(data, appconsts.LatestVersion) + require.NoError(t, err) + + dah, err := da.NewDataAvailabilityHeader(eds) require.NoError(t, err) - dah := da.NewDataAvailabilityHeader(eds) assert.Equal(t, share.EmptyRoot().Hash(), dah.Hash()) } diff --git a/core/exchange.go b/core/exchange.go index 70ca47c56f..f8e1606a3e 100644 --- a/core/exchange.go +++ b/core/exchange.go @@ -9,9 +9,11 @@ import ( "golang.org/x/sync/errgroup" libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/nmt" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/ipld" ) const concurrencyLimit = 4 @@ -76,7 +78,7 @@ func (ce *Exchange) GetVerifiedRange( from *header.ExtendedHeader, amount uint64, ) ([]*header.ExtendedHeader, error) { - headers, err := ce.GetRangeByHeight(ctx, uint64(from.Height())+1, amount) + headers, err := ce.GetRangeByHeight(ctx, from.Height()+1, amount) if err != nil { return nil, err } @@ -105,20 +107,25 @@ func (ce *Exchange) Get(ctx context.Context, hash libhead.Hash) (*header.Extende } // extend block data - eds, err := extendBlock(block.Data) + adder := ipld.NewProofsAdder(int(block.Data.SquareSize)) + defer adder.Purge() + + eds, err := extendBlock(block.Data, block.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) if err != nil { return nil, fmt.Errorf("extending block data for height %d: %w", &block.Height, err) } // construct extended header - eh, err := ce.construct(ctx, &block.Header, comm, vals, eds) + eh, err := ce.construct(&block.Header, comm, vals, eds) if err != nil { - return nil, fmt.Errorf("constructing extended header for height %d: %w", &block.Height, err) + panic(fmt.Errorf("constructing extended header for height %d: %w", &block.Height, err)) } // verify hashes match if !bytes.Equal(hash, eh.Hash()) { return nil, fmt.Errorf("incorrect hash in header at height %d: expected %x, got %x", &block.Height, hash, eh.Hash()) } + + ctx = ipld.CtxWithProofsAdder(ctx, adder) err = storeEDS(ctx, eh.DAH.Hash(), eds, ce.store) if err != nil { return nil, fmt.Errorf("storing EDS to eds.Store for height %d: %w", &block.Height, err) @@ -126,7 +133,10 @@ func (ce *Exchange) Get(ctx context.Context, hash libhead.Hash) (*header.Extende return eh, nil } -func (ce *Exchange) Head(ctx context.Context) (*header.ExtendedHeader, error) { +func (ce *Exchange) Head( + ctx context.Context, + _ ...libhead.HeadOption[*header.ExtendedHeader], +) (*header.ExtendedHeader, error) { log.Debug("requesting head") return ce.getExtendedHeaderByHeight(ctx, nil) } @@ -142,15 +152,20 @@ func (ce *Exchange) getExtendedHeaderByHeight(ctx context.Context, height *int64 log.Debugw("fetched signed block from core", "height", b.Header.Height) // extend block data - eds, err := extendBlock(b.Data) + adder := ipld.NewProofsAdder(int(b.Data.SquareSize)) + defer adder.Purge() + + eds, err := extendBlock(b.Data, b.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) if err != nil { return nil, fmt.Errorf("extending block data for height %d: %w", b.Header.Height, err) } // create extended header - eh, err := ce.construct(ctx, &b.Header, &b.Commit, &b.ValidatorSet, eds) + eh, err := ce.construct(&b.Header, &b.Commit, &b.ValidatorSet, eds) if err != nil { - return nil, fmt.Errorf("constructing extended header for height %d: %w", b.Header.Height, err) + panic(fmt.Errorf("constructing extended header for height %d: %w", b.Header.Height, err)) } + + ctx = ipld.CtxWithProofsAdder(ctx, adder) err = storeEDS(ctx, eh.DAH.Hash(), eds, ce.store) if err != nil { return nil, fmt.Errorf("storing EDS to eds.Store for block height %d: %w", b.Header.Height, err) diff --git a/core/exchange_test.go b/core/exchange_test.go index f6302b5742..579c69fba6 100644 --- a/core/exchange_test.go +++ b/core/exchange_test.go @@ -31,7 +31,7 @@ func TestCoreExchange_RequestHeaders(t *testing.T) { assert.Equal(t, 10, len(headers)) } -func createCoreFetcher(t *testing.T, cfg *TestConfig) (*BlockFetcher, testnode.Context) { +func createCoreFetcher(t *testing.T, cfg *testnode.Config) (*BlockFetcher, testnode.Context) { cctx := StartTestNodeWithConfig(t, cfg) // wait for height 2 in order to be able to start submitting txs (this prevents // flakiness with accessing account state) diff --git a/core/fetcher_test.go b/core/fetcher_test.go index de7d1ba05e..3380dbb402 100644 --- a/core/fetcher_test.go +++ b/core/fetcher_test.go @@ -11,7 +11,7 @@ import ( ) func TestBlockFetcher_GetBlock_and_SubscribeNewBlockEvent(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) t.Cleanup(cancel) client := StartTestNode(t).Client @@ -42,7 +42,7 @@ func TestBlockFetcher_GetBlock_and_SubscribeNewBlockEvent(t *testing.T) { // TestBlockFetcherHeaderValues tests that both the Commit and ValidatorSet // endpoints are working as intended. func TestBlockFetcherHeaderValues(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) t.Cleanup(cancel) client := StartTestNode(t).Client diff --git a/core/header_test.go b/core/header_test.go index 57f53d3661..c942ea7875 100644 --- a/core/header_test.go +++ b/core/header_test.go @@ -30,10 +30,10 @@ func TestMakeExtendedHeaderForEmptyBlock(t *testing.T) { comm, val, err := fetcher.GetBlockInfo(ctx, &height) require.NoError(t, err) - eds, err := extendBlock(b.Data) + eds, err := extendBlock(b.Data, b.Header.Version.App) require.NoError(t, err) - headerExt, err := header.MakeExtendedHeader(ctx, &b.Header, comm, val, eds) + headerExt, err := header.MakeExtendedHeader(&b.Header, comm, val, eds) require.NoError(t, err) assert.Equal(t, header.EmptyDAH(), *headerExt.DAH) @@ -41,11 +41,9 @@ func TestMakeExtendedHeaderForEmptyBlock(t *testing.T) { func TestMismatchedDataHash_ComputedRoot(t *testing.T) { header := headertest.RandExtendedHeader(t) - header.DataHash = rand.Bytes(32) - panicFn := func() { - header.Validate() //nolint:errcheck - } - assert.Panics(t, panicFn) + err := header.Validate() + assert.Contains(t, err.Error(), "mismatch between data hash commitment from"+ + " core header and computed data root") } diff --git a/core/listener.go b/core/listener.go index 24d83cda12..1c79fbbe71 100644 --- a/core/listener.go +++ b/core/listener.go @@ -12,9 +12,11 @@ import ( "go.opentelemetry.io/otel/attribute" libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/nmt" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" ) @@ -150,17 +152,21 @@ func (cl *Listener) handleNewSignedBlock(ctx context.Context, b types.EventDataS attribute.Int64("height", b.Header.Height), ) // extend block data - eds, err := extendBlock(b.Data) + adder := ipld.NewProofsAdder(int(b.Data.SquareSize)) + defer adder.Purge() + + eds, err := extendBlock(b.Data, b.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) if err != nil { return fmt.Errorf("extending block data: %w", err) } // generate extended header - eh, err := cl.construct(ctx, &b.Header, &b.Commit, &b.ValidatorSet, eds) + eh, err := cl.construct(&b.Header, &b.Commit, &b.ValidatorSet, eds) if err != nil { - return fmt.Errorf("making extended header: %w", err) + panic(fmt.Errorf("making extended header: %w", err)) } // attempt to store block data if not empty + ctx = ipld.CtxWithProofsAdder(ctx, adder) err = storeEDS(ctx, b.Header.DataHash.Bytes(), eds, cl.store) if err != nil { return fmt.Errorf("storing EDS: %w", err) @@ -175,7 +181,7 @@ func (cl *Listener) handleNewSignedBlock(ctx context.Context, b types.EventDataS if !syncing { err = cl.hashBroadcaster(ctx, shrexsub.Notification{ DataHash: eh.DataHash.Bytes(), - Height: uint64(eh.Height()), + Height: eh.Height(), }) if err != nil && !errors.Is(err, context.Canceled) { log.Errorw("listener: broadcasting data hash", diff --git a/core/listener_test.go b/core/listener_test.go index 7d4b12310a..8b3d05bea9 100644 --- a/core/listener_test.go +++ b/core/listener_test.go @@ -31,8 +31,8 @@ func TestListener(t *testing.T) { // create mocknet with two pubsub endpoints ps0, ps1 := createMocknetWithTwoPubsubEndpoints(ctx, t) subscriber := p2p.NewSubscriber[*header.ExtendedHeader](ps1, header.MsgID, networkID) - err := subscriber.AddValidator(func(context.Context, *header.ExtendedHeader) pubsub.ValidationResult { - return pubsub.ValidationAccept + err := subscriber.SetVerifier(func(context.Context, *header.ExtendedHeader) error { + return nil }) require.NoError(t, err) require.NoError(t, subscriber.Start(ctx)) diff --git a/core/testing.go b/core/testing.go index 393ec62c09..8d29ce9bbc 100644 --- a/core/testing.go +++ b/core/testing.go @@ -1,30 +1,18 @@ package core import ( - "fmt" "net" "net/url" "testing" + "time" - appconfig "github.com/cosmos/cosmos-sdk/server/config" "github.com/stretchr/testify/require" tmconfig "github.com/tendermint/tendermint/config" tmrand "github.com/tendermint/tendermint/libs/rand" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/celestiaorg/celestia-app/test/util/testnode" ) -// TestConfig encompasses all the configs required to run test Tendermint + Celestia App tandem. -type TestConfig struct { - ConsensusParams *tmproto.ConsensusParams - Tendermint *tmconfig.Config - App *appconfig.Config - - Accounts []string - SuppressLogs bool -} - // DefaultTestConfig returns the default testing configuration for Tendermint + Celestia App tandem. // // It fetches free ports from OS and sets them into configs, s.t. @@ -32,17 +20,8 @@ type TestConfig struct { // multiple tests nodes in parallel. // // Additionally, it instructs Tendermint + Celestia App tandem to setup 10 funded accounts. -func DefaultTestConfig() *TestConfig { - conCfg := testnode.DefaultParams() - - tnCfg := testnode.DefaultTendermintConfig() - tnCfg.RPC.ListenAddress = fmt.Sprintf("tcp://127.0.0.1:%d", getFreePort()) - tnCfg.RPC.GRPCListenAddress = fmt.Sprintf("tcp://127.0.0.1:%d", getFreePort()) - tnCfg.P2P.ListenAddress = fmt.Sprintf("tcp://127.0.0.1:%d", getFreePort()) - - appCfg := testnode.DefaultAppConfig() - appCfg.GRPC.Address = fmt.Sprintf("127.0.0.1:%d", getFreePort()) - appCfg.API.Address = fmt.Sprintf("tcp://127.0.0.1:%d", getFreePort()) +func DefaultTestConfig() *testnode.Config { + cfg := testnode.DefaultConfig() // instructs creating funded accounts // 10 usually is enough for testing @@ -51,13 +30,13 @@ func DefaultTestConfig() *TestConfig { accounts[i] = tmrand.Str(9) } - return &TestConfig{ - ConsensusParams: conCfg, - Tendermint: tnCfg, - App: appCfg, - Accounts: accounts, - SuppressLogs: true, - } + cfg.TmConfig.Consensus.TimeoutCommit = time.Millisecond * 200 + + cfg = cfg. + WithAccounts(accounts). + WithSupressLogs(true) + + return cfg } // StartTestNode simply starts Tendermint and Celestia App tandem with default testing @@ -67,40 +46,13 @@ func StartTestNode(t *testing.T) testnode.Context { } // StartTestNodeWithConfig starts Tendermint and Celestia App tandem with custom configuration. -func StartTestNodeWithConfig(t *testing.T, cfg *TestConfig) testnode.Context { - state, kr, err := testnode.DefaultGenesisState(cfg.Accounts...) - require.NoError(t, err) - - tmNode, app, cctx, err := testnode.New( - t, - cfg.ConsensusParams, - cfg.Tendermint, - cfg.SuppressLogs, - state, - kr, - "private", - ) - require.NoError(t, err) - - cctx, cleanupCoreNode, err := testnode.StartNode(tmNode, cctx) - require.NoError(t, err) - t.Cleanup(func() { - err := cleanupCoreNode() - require.NoError(t, err) - }) - - cctx, cleanupGRPCServer, err := StartGRPCServer(app, cfg.App, cctx) - require.NoError(t, err) - t.Cleanup(func() { - err := cleanupGRPCServer() - require.NoError(t, err) - }) - +func StartTestNodeWithConfig(t *testing.T, cfg *testnode.Config) testnode.Context { + cctx, _, _ := testnode.NewNetwork(t, cfg) // we want to test over remote http client, // so we are as close to the real environment as possible // however, it might be useful to use local tendermint client // if you need to debug something inside of it - ip, port, err := getEndpoint(cfg.Tendermint) + ip, port, err := getEndpoint(cfg.TmConfig) require.NoError(t, err) client, err := NewRemote(ip, port) require.NoError(t, err) @@ -116,18 +68,6 @@ func StartTestNodeWithConfig(t *testing.T, cfg *TestConfig) testnode.Context { return cctx } -func getFreePort() int { - a, err := net.ResolveTCPAddr("tcp", "localhost:0") - if err == nil { - var l *net.TCPListener - if l, err = net.ListenTCP("tcp", a); err == nil { - defer l.Close() - return l.Addr().(*net.TCPAddr).Port - } - } - panic("while getting free port: " + err.Error()) -} - func getEndpoint(cfg *tmconfig.Config) (string, string, error) { url, err := url.Parse(cfg.RPC.ListenAddress) if err != nil { diff --git a/das/coordinator_test.go b/das/coordinator_test.go index 188cb0d222..55ed01dd4e 100644 --- a/das/coordinator_test.go +++ b/das/coordinator_test.go @@ -366,7 +366,7 @@ func (m *mockSampler) sample(ctx context.Context, h *header.ExtendedHeader) erro m.lock.Lock() defer m.lock.Unlock() - height := uint64(h.Height()) + height := h.Height() m.done[height]++ if len(m.done) > int(m.NetworkHead-m.SampleFrom) && !m.isFinished { @@ -503,7 +503,7 @@ func (o *checkOrder) middleWare(out sampleFn) sampleFn { if len(o.queue) > 0 { // check last item in queue to be same as input - if o.queue[0] != uint64(h.Height()) { + if o.queue[0] != h.Height() { defer o.lock.Unlock() return fmt.Errorf("expected height: %v,got: %v", o.queue[0], h.Height()) } @@ -573,7 +573,7 @@ func (l *lock) releaseAll(except ...uint64) { func (l *lock) middleWare(out sampleFn) sampleFn { return func(ctx context.Context, h *header.ExtendedHeader) error { l.m.Lock() - ch, blocked := l.blockList[uint64(h.Height())] + ch, blocked := l.blockList[h.Height()] l.m.Unlock() if !blocked { return out(ctx, h) @@ -589,7 +589,7 @@ func (l *lock) middleWare(out sampleFn) sampleFn { } func onceMiddleWare(out sampleFn) sampleFn { - db := make(map[int64]int) + db := make(map[uint64]int) m := sync.Mutex{} return func(ctx context.Context, h *header.ExtendedHeader) error { m.Lock() diff --git a/das/daser.go b/das/daser.go index d4ad0ee641..9d3e43a91b 100644 --- a/das/daser.go +++ b/das/daser.go @@ -25,7 +25,7 @@ type DASer struct { params Parameters da share.Availability - bcast fraud.Broadcaster + bcast fraud.Broadcaster[*header.ExtendedHeader] hsub libhead.Subscriber[*header.ExtendedHeader] // listens for new headers in the network getter libhead.Getter[*header.ExtendedHeader] // retrieves past headers @@ -47,7 +47,7 @@ func NewDASer( hsub libhead.Subscriber[*header.ExtendedHeader], getter libhead.Getter[*header.ExtendedHeader], dstore datastore.Datastore, - bcast fraud.Broadcaster, + bcast fraud.Broadcaster[*header.ExtendedHeader], shrexBroadcast shrexsub.BroadcastFn, options ...Option, ) (*DASer, error) { @@ -99,7 +99,7 @@ func (d *DASer) Start(ctx context.Context) error { // attempt to get head info. No need to handle error, later DASer // will be able to find new head from subscriber after it is started if h, err := d.getter.Head(ctx); err == nil { - cp.NetworkHead = uint64(h.Height()) + cp.NetworkHead = h.Height() } } log.Info("starting DASer from checkpoint: ", cp.String()) @@ -152,7 +152,7 @@ func (d *DASer) sample(ctx context.Context, h *header.ExtendedHeader) error { var byzantineErr *byzantine.ErrByzantine if errors.As(err, &byzantineErr) { log.Warn("Propagating proof...") - sendErr := d.bcast.Broadcast(ctx, byzantine.CreateBadEncodingProof(h.Hash(), uint64(h.Height()), byzantineErr)) + sendErr := d.bcast.Broadcast(ctx, byzantine.CreateBadEncodingProof(h.Hash(), h.Height(), byzantineErr)) if sendErr != nil { log.Errorw("fraud proof propagating failed", "err", sendErr) } diff --git a/das/daser_test.go b/das/daser_test.go index 7398310a6b..68f6e01ef2 100644 --- a/das/daser_test.go +++ b/das/daser_test.go @@ -159,21 +159,37 @@ func TestDASer_stopsAfter_BEFP(t *testing.T) { mockGet, sub, _ := createDASerSubcomponents(t, bServ, 15, 15) // create fraud service and break one header - getter := func(ctx context.Context, height uint64) (libhead.Header, error) { + getter := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { return mockGet.GetByHeight(ctx, height) } - f := fraudserv.NewProofService(ps, net.Hosts()[0], getter, ds, false, "private") - require.NoError(t, f.Start(ctx)) + unmarshaler := fraud.MultiUnmarshaler[*header.ExtendedHeader]{ + Unmarshalers: map[fraud.ProofType]func([]byte) (fraud.Proof[*header.ExtendedHeader], error){ + byzantine.BadEncoding: func(data []byte) (fraud.Proof[*header.ExtendedHeader], error) { + befp := &byzantine.BadEncodingProof{} + return befp, befp.UnmarshalBinary(data) + }, + }, + } + + fserv := fraudserv.NewProofService[*header.ExtendedHeader](ps, + net.Hosts()[0], + getter, + unmarshaler, + ds, + false, + "private", + ) + require.NoError(t, fserv.Start(ctx)) mockGet.headers[1], _ = headertest.CreateFraudExtHeader(t, mockGet.headers[1], bServ) newCtx := context.Background() // create and start DASer - daser, err := NewDASer(avail, sub, mockGet, ds, f, newBroadcastMock(1)) + daser, err := NewDASer(avail, sub, mockGet, ds, fserv, newBroadcastMock(1)) require.NoError(t, err) resultCh := make(chan error) - go fraud.OnProof(newCtx, f, byzantine.BadEncoding, - func(fraud.Proof) { + go fraud.OnProof[*header.ExtendedHeader](newCtx, fserv, byzantine.BadEncoding, + func(fraud.Proof[*header.ExtendedHeader]) { resultCh <- daser.Stop(newCtx) }) @@ -210,10 +226,10 @@ func TestDASerSampleTimeout(t *testing.T) { ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) sub := new(headertest.Subscriber) - f := new(fraudtest.DummyService) + fserv := &fraudtest.DummyService[*header.ExtendedHeader]{} // create and start DASer - daser, err := NewDASer(avail, sub, getter, ds, f, newBroadcastMock(1), WithSampleTimeout(1)) + daser, err := NewDASer(avail, sub, getter, ds, fserv, newBroadcastMock(1), WithSampleTimeout(1)) require.NoError(t, err) require.NoError(t, daser.Start(ctx)) @@ -235,9 +251,9 @@ func createDASerSubcomponents( bServ blockservice.BlockService, numGetter, numSub int, -) (*mockGetter, *headertest.Subscriber, *fraudtest.DummyService) { +) (*mockGetter, *headertest.Subscriber, *fraudtest.DummyService[*header.ExtendedHeader]) { mockGet, sub := createMockGetterAndSub(t, bServ, numGetter, numSub) - fraud := new(fraudtest.DummyService) + fraud := &fraudtest.DummyService[*header.ExtendedHeader]{} return mockGet, sub, fraud } @@ -313,7 +329,10 @@ func (m *mockGetter) generateHeaders(t *testing.T, bServ blockservice.BlockServi m.head = int64(startHeight + endHeight) } -func (m *mockGetter) Head(context.Context) (*header.ExtendedHeader, error) { +func (m *mockGetter) Head( + context.Context, + ...libhead.HeadOption[*header.ExtendedHeader], +) (*header.ExtendedHeader, error) { return m.headers[m.head], nil } @@ -354,7 +373,10 @@ func (m benchGetterStub) GetByHeight(context.Context, uint64) (*header.ExtendedH type getterStub struct{} -func (m getterStub) Head(context.Context) (*header.ExtendedHeader, error) { +func (m getterStub) Head( + context.Context, + ...libhead.HeadOption[*header.ExtendedHeader], +) (*header.ExtendedHeader, error) { return &header.ExtendedHeader{RawHeader: header.RawHeader{Height: 1}}, nil } diff --git a/das/metrics.go b/das/metrics.go index 1dcf5c8165..42b472d909 100644 --- a/das/metrics.go +++ b/das/metrics.go @@ -6,11 +6,9 @@ import ( "sync/atomic" "time" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" - "go.opentelemetry.io/otel/metric/instrument/syncint64" + "go.opentelemetry.io/otel/metric" "github.com/celestiaorg/celestia-node/header" ) @@ -22,73 +20,70 @@ const ( ) var ( - meter = global.MeterProvider().Meter("das") + meter = otel.Meter("das") ) type metrics struct { - sampled syncint64.Counter - sampleTime syncfloat64.Histogram - getHeaderTime syncfloat64.Histogram - newHead syncint64.Counter + sampled metric.Int64Counter + sampleTime metric.Float64Histogram + getHeaderTime metric.Float64Histogram + newHead metric.Int64Counter lastSampledTS uint64 } func (d *DASer) InitMetrics() error { - sampled, err := meter.SyncInt64().Counter("das_sampled_headers_counter", - instrument.WithDescription("sampled headers counter")) + sampled, err := meter.Int64Counter("das_sampled_headers_counter", + metric.WithDescription("sampled headers counter")) if err != nil { return err } - sampleTime, err := meter.SyncFloat64().Histogram("das_sample_time_hist", - instrument.WithDescription("duration of sampling a single header")) + sampleTime, err := meter.Float64Histogram("das_sample_time_hist", + metric.WithDescription("duration of sampling a single header")) if err != nil { return err } - getHeaderTime, err := meter.SyncFloat64().Histogram("das_get_header_time_hist", - instrument.WithDescription("duration of getting header from header store")) + getHeaderTime, err := meter.Float64Histogram("das_get_header_time_hist", + metric.WithDescription("duration of getting header from header store")) if err != nil { return err } - newHead, err := meter.SyncInt64().Counter("das_head_updated_counter", - instrument.WithDescription("amount of times DAS'er advanced network head")) + newHead, err := meter.Int64Counter("das_head_updated_counter", + metric.WithDescription("amount of times DAS'er advanced network head")) if err != nil { return err } - lastSampledTS, err := meter.AsyncInt64().Gauge("das_latest_sampled_ts", - instrument.WithDescription("latest sampled timestamp")) + lastSampledTS, err := meter.Int64ObservableGauge("das_latest_sampled_ts", + metric.WithDescription("latest sampled timestamp")) if err != nil { return err } - busyWorkers, err := meter.AsyncInt64().Gauge("das_busy_workers_amount", - instrument.WithDescription("number of active parallel workers in DAS'er")) + busyWorkers, err := meter.Int64ObservableGauge("das_busy_workers_amount", + metric.WithDescription("number of active parallel workers in DAS'er")) if err != nil { return err } - networkHead, err := meter.AsyncInt64().Gauge("das_network_head", - instrument.WithDescription("most recent network head")) + networkHead, err := meter.Int64ObservableGauge("das_network_head", + metric.WithDescription("most recent network head")) if err != nil { return err } - sampledChainHead, err := meter.AsyncInt64().Gauge("das_sampled_chain_head", - instrument.WithDescription("height of the sampled chain - all previous headers have been successfully sampled")) + sampledChainHead, err := meter.Int64ObservableGauge("das_sampled_chain_head", + metric.WithDescription("height of the sampled chain - all previous headers have been successfully sampled")) if err != nil { return err } - totalSampled, err := meter. - AsyncInt64(). - Gauge( - "das_total_sampled_headers", - instrument.WithDescription("total sampled headers gauge"), - ) + totalSampled, err := meter.Int64ObservableGauge("das_total_sampled_headers", + metric.WithDescription("total sampled headers gauge"), + ) if err != nil { return err } @@ -100,36 +95,38 @@ func (d *DASer) InitMetrics() error { newHead: newHead, } - err = meter.RegisterCallback( - []instrument.Asynchronous{ - lastSampledTS, - busyWorkers, - networkHead, - sampledChainHead, - totalSampled, - }, - func(ctx context.Context) { - stats, err := d.sampler.stats(ctx) - if err != nil { - log.Errorf("observing stats: %s", err.Error()) - } - - for jobType, amount := range stats.workersByJobType() { - busyWorkers.Observe(ctx, amount, - attribute.String(jobTypeLabel, string(jobType))) - } - - networkHead.Observe(ctx, int64(stats.NetworkHead)) - sampledChainHead.Observe(ctx, int64(stats.SampledChainHead)) - - if ts := atomic.LoadUint64(&d.sampler.metrics.lastSampledTS); ts != 0 { - lastSampledTS.Observe(ctx, int64(ts)) - } - - totalSampled.Observe(ctx, int64(stats.totalSampled())) - }, - ) + callback := func(ctx context.Context, observer metric.Observer) error { + stats, err := d.sampler.stats(ctx) + if err != nil { + log.Errorf("observing stats: %s", err.Error()) + return err + } + + for jobType, amount := range stats.workersByJobType() { + observer.ObserveInt64(busyWorkers, amount, + metric.WithAttributes( + attribute.String(jobTypeLabel, string(jobType)), + )) + } + + observer.ObserveInt64(networkHead, int64(stats.NetworkHead)) + observer.ObserveInt64(sampledChainHead, int64(stats.SampledChainHead)) + + if ts := atomic.LoadUint64(&d.sampler.metrics.lastSampledTS); ts != 0 { + observer.ObserveInt64(lastSampledTS, int64(ts)) + } + observer.ObserveInt64(totalSampled, int64(stats.totalSampled())) + return nil + } + + _, err = meter.RegisterCallback(callback, + lastSampledTS, + busyWorkers, + networkHead, + sampledChainHead, + totalSampled, + ) if err != nil { return fmt.Errorf("registering metrics callback: %w", err) } @@ -153,16 +150,18 @@ func (m *metrics) observeSample( ctx = context.Background() } m.sampleTime.Record(ctx, sampleTime.Seconds(), - attribute.Bool(failedLabel, err != nil), - attribute.Int(headerWidthLabel, len(h.DAH.RowRoots)), - attribute.String(jobTypeLabel, string(jobType)), - ) + metric.WithAttributes( + attribute.Bool(failedLabel, err != nil), + attribute.Int(headerWidthLabel, len(h.DAH.RowRoots)), + attribute.String(jobTypeLabel, string(jobType)), + )) m.sampled.Add(ctx, 1, - attribute.Bool(failedLabel, err != nil), - attribute.Int(headerWidthLabel, len(h.DAH.RowRoots)), - attribute.String(jobTypeLabel, string(jobType)), - ) + metric.WithAttributes( + attribute.Bool(failedLabel, err != nil), + attribute.Int(headerWidthLabel, len(h.DAH.RowRoots)), + attribute.String(jobTypeLabel, string(jobType)), + )) atomic.StoreUint64(&m.lastSampledTS, uint64(time.Now().UTC().Unix())) } diff --git a/das/state.go b/das/state.go index 6af0b7d8d8..bd3a018a40 100644 --- a/das/state.go +++ b/das/state.go @@ -132,30 +132,29 @@ func (s *coordinatorState) handleRetryResult(res result) { } } -func (s *coordinatorState) isNewHead(newHead int64) bool { +func (s *coordinatorState) isNewHead(newHead uint64) bool { // seen this header before - if uint64(newHead) <= s.networkHead { + if newHead <= s.networkHead { log.Warnf("received head height: %v, which is lower or the same as previously known: %v", newHead, s.networkHead) return false } return true } -func (s *coordinatorState) updateHead(newHead int64) { +func (s *coordinatorState) updateHead(newHead uint64) { if s.networkHead == s.sampleFrom { log.Infow("found first header, starting sampling") } - s.networkHead = uint64(newHead) + s.networkHead = newHead log.Debugw("updated head", "from_height", s.networkHead, "to_height", newHead) s.checkDone() } // recentJob creates a job to process a recent header. func (s *coordinatorState) recentJob(header *header.ExtendedHeader) job { - height := uint64(header.Height()) // move next, to prevent catchup job from processing same height - if s.next == height { + if s.next == header.Height() { s.next++ } s.nextJobID++ @@ -163,8 +162,8 @@ func (s *coordinatorState) recentJob(header *header.ExtendedHeader) job { id: s.nextJobID, jobType: recentJob, header: header, - from: height, - to: height, + from: header.Height(), + to: header.Height(), } } diff --git a/das/worker.go b/das/worker.go index 746324ec48..f2e8c4d821 100644 --- a/das/worker.go +++ b/das/worker.go @@ -135,7 +135,7 @@ func (w *worker) sample(ctx context.Context, timeout time.Duration, height uint6 if w.state.job.jobType == recentJob { err = w.broadcast(ctx, shrexsub.Notification{ DataHash: h.DataHash.Bytes(), - Height: uint64(h.Height()), + Height: h.Height(), }) if err != nil { log.Warn("failed to broadcast availability message", diff --git a/go.mod b/go.mod index 9628a0c205..846f4316e6 100644 --- a/go.mod +++ b/go.mod @@ -1,25 +1,27 @@ module github.com/celestiaorg/celestia-node -go 1.20 +go 1.21 + +toolchain go1.21.0 replace github.com/ipfs/go-verifcid => github.com/celestiaorg/go-verifcid v0.0.1-lazypatch require ( - cosmossdk.io/errors v1.0.0-beta.7 - cosmossdk.io/math v1.0.0-beta.3 - github.com/BurntSushi/toml v1.3.0 - github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 - github.com/benbjohnson/clock v1.3.0 - github.com/celestiaorg/celestia-app v1.0.0-rc2 - github.com/celestiaorg/go-fraud v0.1.0 - github.com/celestiaorg/go-header v0.2.7 + cosmossdk.io/errors v1.0.0 + cosmossdk.io/math v1.1.1 + github.com/BurntSushi/toml v1.3.2 + github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b + github.com/benbjohnson/clock v1.3.5 + github.com/celestiaorg/celestia-app v1.0.0-rc12 + github.com/celestiaorg/go-ds-badger4 v0.0.0-20230712104058-7ede1c814ac5 + github.com/celestiaorg/go-fraud v0.2.0 + github.com/celestiaorg/go-header v0.3.0 github.com/celestiaorg/go-libp2p-messenger v0.2.0 - github.com/celestiaorg/nmt v0.16.0 - github.com/celestiaorg/rsmt2d v0.9.0 - github.com/cosmos/cosmos-sdk v0.46.11 + github.com/celestiaorg/nmt v0.18.1 + github.com/celestiaorg/rsmt2d v0.11.0 + github.com/cosmos/cosmos-sdk v0.46.14 github.com/cosmos/cosmos-sdk/api v0.1.0 github.com/cristalhq/jwt v1.2.0 - github.com/dgraph-io/badger/v2 v2.2007.4 github.com/etclabscore/go-openrpc-reflect v0.0.37 github.com/filecoin-project/dagstore v0.5.6 github.com/filecoin-project/go-jsonrpc v0.3.1 @@ -27,70 +29,68 @@ require ( github.com/gogo/protobuf v1.3.3 github.com/golang/mock v1.6.0 github.com/gorilla/mux v1.8.0 - github.com/hashicorp/go-retryablehttp v0.7.2 - github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d + github.com/hashicorp/go-retryablehttp v0.7.4 + github.com/hashicorp/golang-lru v1.0.2 github.com/imdario/mergo v0.3.16 - github.com/ipfs/go-blockservice v0.5.0 - github.com/ipfs/go-cid v0.3.2 + github.com/ipfs/boxo v0.11.0 + github.com/ipfs/go-block-format v0.1.2 + github.com/ipfs/go-blockservice v0.5.1 // down 1 version, 0.5.2 is marked as deprecated and raises alerts + github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 - github.com/ipfs/go-ds-badger2 v0.1.3 - github.com/ipfs/go-ipfs-blockstore v1.2.0 - github.com/ipfs/go-ipfs-exchange-interface v0.2.0 - github.com/ipfs/go-ipfs-exchange-offline v0.3.0 - github.com/ipfs/go-ipfs-routing v0.3.0 github.com/ipfs/go-ipld-cbor v0.0.6 - github.com/ipfs/go-ipld-format v0.4.0 - github.com/ipfs/go-libipfs v0.6.0 + github.com/ipfs/go-ipld-format v0.5.0 github.com/ipfs/go-log/v2 v2.5.1 - github.com/ipfs/go-merkledag v0.10.0 - github.com/ipld/go-car v0.6.0 - github.com/libp2p/go-libp2p v0.26.3 - github.com/libp2p/go-libp2p-kad-dht v0.21.0 + github.com/ipfs/go-merkledag v0.11.0 + github.com/ipld/go-car v0.6.2 + github.com/libp2p/go-libp2p v0.30.0 + github.com/libp2p/go-libp2p-kad-dht v0.25.0 github.com/libp2p/go-libp2p-pubsub v0.9.3 github.com/libp2p/go-libp2p-record v0.2.0 - github.com/libp2p/go-libp2p-routing-helpers v0.6.1 - github.com/minio/sha256-simd v1.0.0 + github.com/libp2p/go-libp2p-routing-helpers v0.7.1 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.1.0 - github.com/multiformats/go-multiaddr v0.8.0 + github.com/multiformats/go-multiaddr v0.11.0 github.com/multiformats/go-multiaddr-dns v0.3.1 - github.com/multiformats/go-multihash v0.2.2-0.20221030163302-608669da49b6 + github.com/multiformats/go-multihash v0.2.3 github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 - github.com/prometheus/client_golang v1.14.0 - github.com/pyroscope-io/client v0.7.1 + github.com/prometheus/client_golang v1.16.0 + github.com/pyroscope-io/client v0.7.2 github.com/pyroscope-io/otel-profiling-go v0.4.0 - github.com/spf13/cobra v1.6.1 + github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 - github.com/tendermint/tendermint v0.34.24 - go.opentelemetry.io/otel v1.13.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.34.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.2 - go.opentelemetry.io/otel/metric v0.34.0 - go.opentelemetry.io/otel/sdk v1.11.2 - go.opentelemetry.io/otel/sdk/metric v0.34.0 - go.opentelemetry.io/otel/trace v1.13.0 - go.opentelemetry.io/proto/otlp v0.19.0 - go.uber.org/fx v1.19.3 - go.uber.org/zap v1.24.0 - golang.org/x/crypto v0.9.0 - golang.org/x/exp v0.0.0-20230206171751-46f607a40771 - golang.org/x/sync v0.1.0 - golang.org/x/text v0.9.0 - google.golang.org/grpc v1.53.0 - google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 + github.com/tendermint/tendermint v0.34.28 + go.opentelemetry.io/contrib/instrumentation/runtime v0.42.0 + go.opentelemetry.io/otel v1.16.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.39.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 + go.opentelemetry.io/otel/metric v1.16.0 + go.opentelemetry.io/otel/sdk v1.16.0 + go.opentelemetry.io/otel/sdk/metric v0.39.0 + go.opentelemetry.io/otel/trace v1.16.0 + go.opentelemetry.io/proto/otlp v1.0.0 + go.uber.org/fx v1.20.0 + go.uber.org/zap v1.25.0 + golang.org/x/crypto v0.12.0 + golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 + golang.org/x/sync v0.3.0 + golang.org/x/text v0.12.0 + google.golang.org/grpc v1.57.0 + google.golang.org/protobuf v1.31.0 ) require ( - cloud.google.com/go v0.107.0 // indirect - cloud.google.com/go/compute v1.15.1 // indirect + cloud.google.com/go v0.110.6 // indirect + cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.8.0 // indirect - cloud.google.com/go/storage v1.27.0 // indirect + cloud.google.com/go/iam v1.1.1 // indirect + cloud.google.com/go/storage v1.30.1 // indirect filippo.io/edwards25519 v1.0.0-rc.1 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.1 // indirect - github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d // indirect + github.com/ChainSafe/go-schnorrkel v1.0.0 // indirect + github.com/Jorropo/jsync v1.0.1 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/StackExchange/wmi v1.2.1 // indirect @@ -103,23 +103,23 @@ require ( github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 // indirect github.com/celestiaorg/quantum-gravity-bridge v1.3.0 // indirect - github.com/cenkalti/backoff/v4 v4.2.0 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/chzyer/readline v1.5.0 // indirect + github.com/chzyer/readline v1.5.1 // indirect github.com/cockroachdb/apd/v2 v2.0.2 // indirect github.com/coinbase/rosetta-sdk-go v0.7.9 // indirect github.com/cometbft/cometbft-db v0.7.0 // indirect github.com/confio/ics23/go v0.9.0 // indirect - github.com/containerd/cgroups v1.0.4 // indirect + github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cosmos/btcutil v1.0.5 // indirect github.com/cosmos/cosmos-proto v1.0.0-alpha8 // indirect github.com/cosmos/go-bip39 v1.0.0 // indirect - github.com/cosmos/gogoproto v1.4.2 // indirect + github.com/cosmos/gogoproto v1.4.11 // indirect github.com/cosmos/gorocksdb v1.2.0 // indirect - github.com/cosmos/iavl v0.19.5 // indirect - github.com/cosmos/ibc-go/v6 v6.1.1 // indirect + github.com/cosmos/iavl v0.19.6 // indirect + github.com/cosmos/ibc-go/v6 v6.2.0 // indirect github.com/cosmos/ledger-cosmos-go v0.12.2 // indirect github.com/creachadair/taskgroup v0.3.2 // indirect github.com/cskr/pubsub v1.0.2 // indirect @@ -127,10 +127,12 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect - github.com/dgraph-io/ristretto v0.1.0 // indirect + github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/badger/v4 v4.1.0 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac // indirect @@ -146,7 +148,7 @@ require ( github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect @@ -154,27 +156,29 @@ require ( github.com/go-openapi/spec v0.19.11 // indirect github.com/go-openapi/swag v0.19.11 // indirect github.com/go-stack/stack v1.8.1 // indirect - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/gateway v1.1.0 // indirect - github.com/golang/glog v1.0.0 // indirect + github.com/golang/glog v1.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.2 // indirect + github.com/google/flatbuffers v1.12.1 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/orderedcode v0.0.1 // indirect - github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect - github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect + github.com/google/s2a-go v0.1.4 // indirect + github.com/google/uuid v1.3.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.11.0 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/gtank/merlin v0.1.1 // indirect github.com/gtank/ristretto255 v0.1.2 // indirect @@ -185,102 +189,102 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-safetemp v1.0.0 // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3 // indirect github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect - github.com/huin/goupnp v1.0.3 // indirect + github.com/huin/goupnp v1.2.0 // indirect github.com/iancoleman/orderedmap v0.1.0 // indirect github.com/improbable-eng/grpc-web v0.15.0 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/influxdata/influxdb-client-go/v2 v2.12.2 // indirect github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect github.com/ipfs/bbloom v0.0.4 // indirect - github.com/ipfs/go-bitswap v0.12.0 // indirect - github.com/ipfs/go-block-format v0.1.1 // indirect + github.com/ipfs/go-ipfs-blockstore v1.3.1 // indirect github.com/ipfs/go-ipfs-delay v0.0.1 // indirect - github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect + github.com/ipfs/go-ipfs-ds-help v1.1.0 + github.com/ipfs/go-ipfs-exchange-interface v0.2.1 // indirect + github.com/ipfs/go-ipfs-exchange-offline v0.3.0 // indirect github.com/ipfs/go-ipfs-pq v0.0.3 // indirect - github.com/ipfs/go-ipfs-util v0.0.2 // indirect - github.com/ipfs/go-ipld-legacy v0.1.1 // indirect - github.com/ipfs/go-ipns v0.3.0 // indirect + github.com/ipfs/go-ipfs-util v0.0.3 // indirect + github.com/ipfs/go-ipld-legacy v0.2.1 // indirect github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-metrics-interface v0.0.1 // indirect github.com/ipfs/go-peertaskqueue v0.8.1 // indirect github.com/ipfs/go-verifcid v0.0.2 // indirect - github.com/ipld/go-car/v2 v2.5.1 // indirect + github.com/ipld/go-car/v2 v2.11.0 // indirect github.com/ipld/go-codec-dagpb v1.6.0 // indirect - github.com/ipld/go-ipld-prime v0.20.0 // indirect + github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/compress v1.15.15 // indirect - github.com/klauspost/cpuid/v2 v2.2.3 // indirect - github.com/klauspost/reedsolomon v1.11.1 // indirect - github.com/koron/go-ssdp v0.0.3 // indirect - github.com/lib/pq v1.10.6 // indirect + github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/klauspost/reedsolomon v1.11.8 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect + github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect - github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect - github.com/libp2p/go-nat v0.1.0 // indirect + github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect - github.com/libp2p/go-reuseport v0.2.0 // indirect - github.com/libp2p/go-yamux/v4 v4.0.0 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect + github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/magiconair/properties v1.8.6 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.50 // indirect + github.com/miekg/dns v1.1.55 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect github.com/minio/highwayhash v1.0.2 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.1.1 // indirect - github.com/multiformats/go-multicodec v0.8.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.0 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onsi/ginkgo/v2 v2.5.1 // indirect - github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect + github.com/onsi/ginkgo/v2 v2.11.0 // indirect + github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.7 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect - github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect + github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.39.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect - github.com/pyroscope-io/godeltaprof v0.1.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/pyroscope-io/godeltaprof v0.1.2 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-19 v0.2.1 // indirect - github.com/quic-go/qtls-go1-20 v0.1.1 // indirect - github.com/quic-go/quic-go v0.33.0 // indirect - github.com/quic-go/webtransport-go v0.5.2 // indirect + github.com/quic-go/qtls-go1-20 v0.3.2 // indirect + github.com/quic-go/quic-go v0.37.6 // indirect + github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/rakyll/statik v0.1.7 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/regen-network/cosmos-proto v0.3.1 // indirect - github.com/rivo/uniseg v0.4.2 // indirect + github.com/rivo/uniseg v0.4.4 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/rs/zerolog v1.27.0 // indirect + github.com/rs/zerolog v1.29.1 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect github.com/shirou/gopsutil v3.21.6+incompatible // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect @@ -289,7 +293,7 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.14.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/tendermint/go-amino v0.16.0 // indirect github.com/tendermint/tm-db v0.6.7 // indirect @@ -298,40 +302,44 @@ require ( github.com/tklauser/numcpus v0.4.0 // indirect github.com/ulikunitz/xz v0.5.10 // indirect github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect - github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa // indirect + github.com/whyrusleeping/cbor-gen v0.0.0-20230818171029-f91ae536ca25 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/zondax/hid v0.9.1 // indirect github.com/zondax/ledger-go v0.14.1 // indirect go.etcd.io/bbolt v1.3.6 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.34.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/dig v1.16.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.17.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/mod v0.9.0 // indirect - golang.org/x/net v0.10.0 // indirect - golang.org/x/oauth2 v0.4.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/term v0.8.0 // indirect - golang.org/x/tools v0.7.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.14.0 // indirect + golang.org/x/oauth2 v0.8.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/term v0.11.0 // indirect + golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/api v0.103.0 // indirect + gonum.org/v1/gonum v0.13.0 // indirect + google.golang.org/api v0.126.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230815205213-6bfd019c3878 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.1.7 // indirect + lukechampine.com/blake3 v1.2.1 // indirect nhooyr.io/websocket v1.8.7 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) replace ( - github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v1.13.0-sdk-v0.46.11 - github.com/filecoin-project/dagstore => github.com/celestiaorg/dagstore v0.0.0-20230413141458-735ab09a15d6 + github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v1.17.0-sdk-v0.46.14 + github.com/filecoin-project/dagstore => github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403 github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 - github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.21.2-tm-v0.34.27 + // broken goleveldb needs to be replaced for the cosmos-sdk and celestia-app + github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 + github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.24.0-tm-v0.34.28 ) diff --git a/go.sum b/go.sum index bf659652b4..09bd8ab26e 100644 --- a/go.sum +++ b/go.sum @@ -36,8 +36,8 @@ cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w9 cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= @@ -75,8 +75,8 @@ cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= @@ -116,13 +116,12 @@ cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y97 cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= @@ -179,8 +178,9 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= +cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= @@ -193,10 +193,10 @@ cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuW cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -cosmossdk.io/errors v1.0.0-beta.7 h1:gypHW76pTQGVnHKo6QBkb4yFOJjC+sUGRc5Al3Odj1w= -cosmossdk.io/errors v1.0.0-beta.7/go.mod h1:mz6FQMJRku4bY7aqS/Gwfcmr/ue91roMEKAmDUDpBfE= -cosmossdk.io/math v1.0.0-beta.3 h1:TbZxSopz2LqjJ7aXYfn7nJSb8vNaBklW6BLpcei1qwM= -cosmossdk.io/math v1.0.0-beta.3/go.mod h1:3LYasri3Zna4XpbrTNdKsWmD5fHHkaNAod/mNT9XdE4= +cosmossdk.io/errors v1.0.0 h1:nxF07lmlBbB8NKQhtJ+sJm6ef5uV1XkvPXG2bUntb04= +cosmossdk.io/errors v1.0.0/go.mod h1:+hJZLuhdDE0pYN8HkOrVNwrIOYvUGnn6+4fjnJs/oV0= +cosmossdk.io/math v1.1.1 h1:Eqx44E6fSvG055Z6VNiCLWA9fra0JSyP0kQX7VvNNfk= +cosmossdk.io/math v1.1.1/go.mod h1:uFRkSZDz38KjWjm6jN+/sI8tJWQxbGwxcjOTzapWSpE= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -219,24 +219,29 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3 github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.3.0 h1:Ws8e5YmnrGEHzZEzg0YvK/7COGYtTC5PbaH9oSSbgfA= -github.com/BurntSushi/toml v1.3.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= -github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= +github.com/ChainSafe/go-schnorrkel v1.0.0 h1:3aDA67lAykLaG1y3AOjs88dMxC88PgUuHRrLeDnvGIM= +github.com/ChainSafe/go-schnorrkel v1.0.0/go.mod h1:dpzHYVxLZcp8pjlV+O+UR8K0Hp/z7vcchBSbMBEhCw4= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= @@ -248,6 +253,7 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o= github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= @@ -256,16 +262,19 @@ github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSa github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= github.com/Zilliqa/gozilliqa-sdk v1.2.1-0.20201201074141-dd0ecada1be6/go.mod h1:eSYp2T6f0apnuW8TzhV3f6Aff2SE8Dwio++U4ha4yEM= github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I= +github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 h1:T3+cD5fYvuH36h7EZq+TDpm+d8a6FSD4pQsbmuGGQ8o= github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921/go.mod h1:/n6+1/DWPltRLWL/VKyUxg6tzsl5kHUCcraimt4vr60= +github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b h1:doCpXjVwui6HUN+xgNsNS3SZ0/jUZ68Eb+mJRNOZfog= +github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b/go.mod h1:/n6+1/DWPltRLWL/VKyUxg6tzsl5kHUCcraimt4vr60= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= @@ -295,8 +304,9 @@ github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbE github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM= github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -311,6 +321,7 @@ github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBT github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190315201642-aa6e0f35703c/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= @@ -319,9 +330,11 @@ github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= github.com/btcsuite/btcd/btcec/v2 v2.1.2/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= +github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= @@ -337,39 +350,43 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/bufbuild/protocompile v0.1.0 h1:HjgJBI85hY/qmW5tw/66sNDZ7z0UDdVSi/5r40WHw4s= +github.com/bufbuild/protocompile v0.1.0/go.mod h1:ix/MMMdsT3fzxfw91dvbfzKW3fRRnuPCP47kpAm5m/4= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/celestiaorg/celestia-app v1.0.0-rc2 h1:/u7eespYtBpQtBSz3P8/rKfz9rW7QOxkH8ebh8T4VxI= -github.com/celestiaorg/celestia-app v1.0.0-rc2/go.mod h1:uiTWKTtRpVwvSiFDl2zausrU1ZBHBWgk7z52pfzJqJU= -github.com/celestiaorg/celestia-core v1.21.2-tm-v0.34.27 h1:nmr9O5BflgNR1aWehs1ZFw4obA//M/+g+SrSMK9sOBA= -github.com/celestiaorg/celestia-core v1.21.2-tm-v0.34.27/go.mod h1:GVo91Wifg9KL/nFx9nPkpl0UIFdvvs4fhnly9GhGxZU= -github.com/celestiaorg/cosmos-sdk v1.13.0-sdk-v0.46.11 h1:Rd5EvJx1nG3KurBspVN51RVmvif0Lp2UVURbG2ad3Cs= -github.com/celestiaorg/cosmos-sdk v1.13.0-sdk-v0.46.11/go.mod h1:xCG6OUkJy5KUMEg20Zk010lra9XjkmKS3+bk0wp7bd8= -github.com/celestiaorg/dagstore v0.0.0-20230413141458-735ab09a15d6 h1:/yCwMCoOPcYCiG18u8/1pv5eXF04xczoQO3sR0bKsgM= -github.com/celestiaorg/dagstore v0.0.0-20230413141458-735ab09a15d6/go.mod h1:ta/DlqIH10bvhwqJIw51Nq3QU4XVMp6pz3f0Deve9fM= -github.com/celestiaorg/go-fraud v0.1.0 h1:v6mZvlmf2J5ELZfPnrtmmOvKbaYIUs/erDWPO8NbZyY= -github.com/celestiaorg/go-fraud v0.1.0/go.mod h1:yoNM35cKMAkt5Mi/Qx3Wi9bnPilLi8n6RpHZVglTUDs= -github.com/celestiaorg/go-header v0.2.7 h1:r0X9Dl7lqBkQpwG3ekQHC61n/QdwO6epuIxDkQ4YX4o= -github.com/celestiaorg/go-header v0.2.7/go.mod h1:i9OpY70+PJ1xPw1IgMfF0Pk6vBD6VWPmjY3bgubJBcU= +github.com/celestiaorg/celestia-app v1.0.0-rc12 h1:ko9hPD4oz1UTS4ZqzikGVQ0wXi5+4kEhDb7decx5Ehs= +github.com/celestiaorg/celestia-app v1.0.0-rc12/go.mod h1:vXvKEudUpdJCvUr79qVKKJ0Xo7ofsuU80+Hs9aKGjvU= +github.com/celestiaorg/celestia-core v1.24.0-tm-v0.34.28 h1:eXS3v26nob8Xs2+flKHVxcTzhzQW44KgTcooR3OxnK4= +github.com/celestiaorg/celestia-core v1.24.0-tm-v0.34.28/go.mod h1:J/GsBjoTZaFz71VeyrLZbG8rV+Rzi6oFEUZUipQ97hQ= +github.com/celestiaorg/cosmos-sdk v1.17.0-sdk-v0.46.14 h1:PckXGxLJjXv97VO3xS8NPHN5oO83X5nvJLbc/4s8jUM= +github.com/celestiaorg/cosmos-sdk v1.17.0-sdk-v0.46.14/go.mod h1:70Go8qNy7YAb1PUcHCChRHNX2ke7c9jgUIEklUX+Mac= +github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403 h1:Lj73O3S+KJx5/hgZ+IeOLEIoLsAveJN/7/ZtQQtPSVw= +github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403/go.mod h1:cCGM1UoMvyTk8k62mkc+ReVu8iHBCtSBAAL4wYU7KEI= +github.com/celestiaorg/go-ds-badger4 v0.0.0-20230712104058-7ede1c814ac5 h1:MJgXvhJP1Au8rXTvMMlBXodu9jplEK1DxiLtMnEphOs= +github.com/celestiaorg/go-ds-badger4 v0.0.0-20230712104058-7ede1c814ac5/go.mod h1:r6xB3nvGotmlTACpAr3SunxtoXeesbqb57elgMJqflY= +github.com/celestiaorg/go-fraud v0.2.0 h1:aaq2JiW0gTnhEdac3l51UCqSyJ4+VjFGTTpN83V4q7I= +github.com/celestiaorg/go-fraud v0.2.0/go.mod h1:lNY1i4K6kUeeE60Z2VK8WXd+qXb8KRzfBhvwPkK6aUc= +github.com/celestiaorg/go-header v0.3.0 h1:9fhxSgldPiWWq3yd9u7oSk5vYqaLV1JkeTnJdGcisFo= +github.com/celestiaorg/go-header v0.3.0/go.mod h1:H8xhnDLDLbkpwmWPhCaZyTnIV3dlVxBHPnxNXS2Qu6c= github.com/celestiaorg/go-libp2p-messenger v0.2.0 h1:/0MuPDcFamQMbw9xTZ73yImqgTO3jHV7wKHvWD/Irao= github.com/celestiaorg/go-libp2p-messenger v0.2.0/go.mod h1:s9PIhMi7ApOauIsfBcQwbr7m+HBzmVfDIS+QLdgzDSo= github.com/celestiaorg/go-verifcid v0.0.1-lazypatch h1:9TSe3w1cmJmbWlweCwCTIZkan7jV8M+KwglXpdD+UG8= github.com/celestiaorg/go-verifcid v0.0.1-lazypatch/go.mod h1:kXPYu0XqTNUKWA1h3M95UHjUqBzDwXVVt/RXZDjKJmQ= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= -github.com/celestiaorg/nmt v0.16.0 h1:4CX6d1Uwf1C+tGcAWskPve0HCDTnI4Ey8ffjiDwcGH0= -github.com/celestiaorg/nmt v0.16.0/go.mod h1:GfwIvQPhUakn1modWxJ+rv8dUjJzuXg5H+MLFM1o7nY= +github.com/celestiaorg/nmt v0.18.1 h1:zU3apzW4y0fs0ilQA74XnEYW8FvRv0CUK2LXK66L3rA= +github.com/celestiaorg/nmt v0.18.1/go.mod h1:0l8q6UYRju1xNrxtvV6NwPdW3lfsN6KuZ0htRnModdc= github.com/celestiaorg/quantum-gravity-bridge v1.3.0 h1:9zPIp7w1FWfkPnn16y3S4FpFLnQtS7rm81CUVcHEts0= github.com/celestiaorg/quantum-gravity-bridge v1.3.0/go.mod h1:6WOajINTDEUXpSj5UZzod16UZ96ZVB/rFNKyM+Mt1gI= -github.com/celestiaorg/rsmt2d v0.9.0 h1:kon78I748ZqjNzI8OAqPN+2EImuZuanj/6gTh8brX3o= -github.com/celestiaorg/rsmt2d v0.9.0/go.mod h1:E06nDxfoeBDltWRvTR9dLviiUZI5/6mLXAuhSJzz3Iw= +github.com/celestiaorg/rsmt2d v0.11.0 h1:lcto/637WyTEZR3dLRoNvyuExfnUbxvdvKi3qz/2V4k= +github.com/celestiaorg/rsmt2d v0.11.0/go.mod h1:6Y580I3gVr0+OVFfW6m2JTwnCCmvW3WfbwSLfuT+HCA= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= -github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= @@ -382,14 +399,17 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/logex v1.2.0 h1:+eqR0HfOetur4tgnC8ftU5imRnhi4te+BadWS95c5AM= github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= +github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/readline v1.5.0 h1:lSwwFrbNviGePhkewF1az4oLmcwqCZijQ2/Wi3BGHAI= github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/chzyer/test v0.0.0-20210722231415-061457976a23 h1:dZ0/VyGgQdVGAss6Ju0dt5P0QltE0SFY5Woh6hbIfiQ= github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= @@ -410,9 +430,13 @@ github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b80 github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= +github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk= +github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= +github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= github.com/coinbase/kryptology v1.8.0/go.mod h1:RYXOAPdzOGUe3qlSFkMGn58i3xUA8hmxYHksuq+8ciI= @@ -428,9 +452,11 @@ github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1 github.com/consensys/gnark-crypto v0.5.3/go.mod h1:hOdPlWQV1gDLp7faZVeg8Y0iEPFaOUnCc4XeCCk96p0= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= -github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -440,7 +466,7 @@ github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -454,14 +480,14 @@ github.com/cosmos/cosmos-sdk/api v0.1.0/go.mod h1:CupqQBskAOiTXO1XDZ/wrtWzN/wTxU github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= -github.com/cosmos/gogoproto v1.4.2 h1:UeGRcmFW41l0G0MiefWhkPEVEwvu78SZsHBvI78dAYw= -github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU= +github.com/cosmos/gogoproto v1.4.11 h1:LZcMHrx4FjUgrqQSWeaGC1v/TeuVFqSLa43CC6aWR2g= +github.com/cosmos/gogoproto v1.4.11/go.mod h1:/g39Mh8m17X8Q/GDEs5zYTSNaNnInBSohtaxzQnYq1Y= github.com/cosmos/gorocksdb v1.2.0 h1:d0l3jJG8M4hBouIZq0mDUHZ+zjOx044J3nGRskwTb4Y= github.com/cosmos/gorocksdb v1.2.0/go.mod h1:aaKvKItm514hKfNJpUJXnnOWeBnk2GL4+Qw9NHizILw= -github.com/cosmos/iavl v0.19.5 h1:rGA3hOrgNxgRM5wYcSCxgQBap7fW82WZgY78V9po/iY= -github.com/cosmos/iavl v0.19.5/go.mod h1:X9PKD3J0iFxdmgNLa7b2LYWdsGd90ToV5cAONApkEPw= -github.com/cosmos/ibc-go/v6 v6.1.1 h1:oqqMNyjj6SLQF8rvgCaDGwfdITEIsbhs8F77/8xvRIo= -github.com/cosmos/ibc-go/v6 v6.1.1/go.mod h1:NL17FpFAaWjRFVb1T7LUKuOoMSsATPpu+Icc4zL5/Ik= +github.com/cosmos/iavl v0.19.6 h1:XY78yEeNPrEYyNCKlqr9chrwoeSDJ0bV2VjocTk//OU= +github.com/cosmos/iavl v0.19.6/go.mod h1:X9PKD3J0iFxdmgNLa7b2LYWdsGd90ToV5cAONApkEPw= +github.com/cosmos/ibc-go/v6 v6.2.0 h1:HKS5WNxQrlmjowHb73J9LqlNJfvTnvkbhXZ9QzNTU7Q= +github.com/cosmos/ibc-go/v6 v6.2.0/go.mod h1:+S3sxcNwOhgraYDJAhIFDg5ipXHaUnJrg7tOQqGyWlc= github.com/cosmos/ledger-cosmos-go v0.12.2 h1:/XYaBlE2BJxtvpkHiBm97gFGSGmYGKunKyF3nNqAXZA= github.com/cosmos/ledger-cosmos-go v0.12.2/go.mod h1:ZcqYgnfNJ6lAXe4HPtWgarNEY+B74i+2/8MhZw4ziiI= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= @@ -470,6 +496,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -492,11 +520,13 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6Uh github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= @@ -508,14 +538,15 @@ github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhY github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/badger/v4 v4.1.0 h1:E38jc0f+RATYrycSUf9LMv/t47XAy+3CApyYSq4APOQ= +github.com/dgraph-io/badger/v4 v4.1.0/go.mod h1:P50u28d39ibBRmIJuQC/NSdBOg46HnHw7al2SW5QRHg= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -528,6 +559,7 @@ github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/ github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -566,8 +598,11 @@ github.com/ethereum/go-ethereum v1.10.17/go.mod h1:Lt5WzjM07XlXc95YzrhosmR4J9Ahd github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0= github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= @@ -578,22 +613,30 @@ github.com/filecoin-project/go-jsonrpc v0.3.1/go.mod h1:jBSvPTl8V1N7gSTuCR4bis8w github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gabriel-vasile/mimetype v1.4.1/go.mod h1:05Vi0w3Y9c/lNvJOdmIwvrrAhX3rYhfQQCaf9VJcv7M= github.com/gammazero/deque v0.2.0 h1:SkieyNB4bg2/uZZLxvya0Pq6diUlwx7m2TeT7GAIWaA= github.com/gammazero/deque v0.2.0/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q= @@ -603,11 +646,13 @@ github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= +github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.7.0 h1:jGB9xAJQ12AIGNB4HguylppmDK1Am9ppF7XnGXXJuoU= +github.com/gin-gonic/gin v1.7.0/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= @@ -633,8 +678,9 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= @@ -664,21 +710,26 @@ github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD87 github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -686,6 +737,7 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/gateway v1.1.0 h1:u0SuhL9+Il+UbjM9VIE3ntfRujKbvVpFvNB4HbjeVQ0= github.com/gogo/gateway v1.1.0/go.mod h1:S7rR8FRQyG3QFESeSv4l2WnsyzlCLG0CzBbUUo/mbic= @@ -695,8 +747,8 @@ github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -732,10 +784,10 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= @@ -746,6 +798,8 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -768,6 +822,7 @@ github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= @@ -775,8 +830,9 @@ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPg github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -791,22 +847,28 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20221203041831-ce31453925ec h1:fR20TYVVwhK4O7r7y+McjRYyaTH6/vjwJOajE+XhlzM= github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo= +github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -818,12 +880,13 @@ github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99 github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= @@ -832,6 +895,7 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -848,8 +912,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= @@ -874,6 +938,7 @@ github.com/hashicorp/go-getter v1.7.0 h1:bzrYP+qu/gMrL1au7/aDvkoOVGUJpeKBgbqRHAC github.com/hashicorp/go-getter v1.7.0/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= +github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -882,8 +947,8 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= -github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= +github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= @@ -899,10 +964,12 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= +github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -920,22 +987,24 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.0.3-0.20220313090229-ca81a64b4204/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= -github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= +github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= +github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/iancoleman/orderedmap v0.1.0 h1:2orAxZBJsvimgEBmMWfXaFlzSG2fbQil5qzP3F6cCkg= github.com/iancoleman/orderedmap v0.1.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= @@ -951,19 +1020,35 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/go-bitfield v1.0.0 h1:y/XHm2GEmD9wKngheWNNCNL0pzrWXZwCdQGv1ikXknQ= +github.com/ipfs/boxo v0.11.0 h1:urMxhZ3xoF4HssJVD3+0ssGT9pptEfHfbL8DYdoWFlg= +github.com/ipfs/boxo v0.11.0/go.mod h1:8IfDmp+FzFGcF4zjAgHMVPpwYw4AjN9ePEzDfkaYJ1w= +github.com/ipfs/go-bitfield v1.0.0/go.mod h1:N/UiujQy+K+ceU1EF5EkVd1TNqevLrCQMIcAEPrdtus= +github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= +github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= +github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= +github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= +github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= +github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= -github.com/ipfs/go-bitswap v0.12.0 h1:ClbLaufwv8SRQK0sBhl4wDVqJoZGAGMVxdjQy5CTt6c= -github.com/ipfs/go-bitswap v0.12.0/go.mod h1:Iwjkd6+vaDjVIa6b6ogmZgs+b5U3EkIFEX79kQ4DjnI= +github.com/ipfs/go-bitswap v0.6.0/go.mod h1:Hj3ZXdOC5wBJvENtdqsixmzzRukqd8EHLxZLZc3mzRA= +github.com/ipfs/go-bitswap v0.11.0 h1:j1WVvhDX1yhG32NTC9xfxnqycqYIlhzEzLXG/cU1HyQ= +github.com/ipfs/go-bitswap v0.11.0/go.mod h1:05aE8H3XOU+LXpTedeAS0OZpcO1WFsj5niYQH9a1Tmk= +github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= -github.com/ipfs/go-block-format v0.1.1 h1:129vSO3zwbsYADcyQWcOYiuCpAqt462SFfqFHdFJhhI= github.com/ipfs/go-block-format v0.1.1/go.mod h1:+McEIT+g52p+zz5xGAABGSOKrzmrdX97bc0USBdWPUs= +github.com/ipfs/go-block-format v0.1.2 h1:GAjkfhVx1f4YTODS6Esrj1wt2HhrtwTnhEr+DyPUaJo= +github.com/ipfs/go-block-format v0.1.2/go.mod h1:mACVcrxarQKstUU3Yf/RdwbC4DzPV6++rO2a3d+a/KE= +github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= +github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= -github.com/ipfs/go-blockservice v0.5.0 h1:B2mwhhhVQl2ntW2EIpaWPwSCxSuqr5fFA93Ms4bYLEY= +github.com/ipfs/go-blockservice v0.3.0/go.mod h1:P5ppi8IHDC7O+pA0AlGTF09jruB2h+oP3wVVaZl8sfk= github.com/ipfs/go-blockservice v0.5.0/go.mod h1:W6brZ5k20AehbmERplmERn8o2Ni3ZZubvAxaIUeaT6w= +github.com/ipfs/go-blockservice v0.5.1 h1:9pAtkyKAz/skdHTh0kH8VulzWp+qmSDD0aI17TYP/s0= +github.com/ipfs/go-blockservice v0.5.1/go.mod h1:VpMblFEqG67A/H2sHKAemeH9vlURVavlysbdUI632yk= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -972,10 +1057,18 @@ github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67Fexh github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= -github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= +github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= +github.com/ipfs/go-cid v0.3.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= +github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= @@ -988,57 +1081,91 @@ github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= +github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= -github.com/ipfs/go-ds-badger2 v0.1.3 h1:Zo9JicXJ1DmXTN4KOw7oPXkspZ0AWHcAFCP1tQKnegg= -github.com/ipfs/go-ds-badger2 v0.1.3/go.mod h1:TPhhljfrgewjbtuL/tczP8dNrBYwwk+SdPYbms/NO9w= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-fetcher v1.5.0/go.mod h1:5pDZ0393oRF/fHiLmtFZtpMNBQfHOYNPtryWedVuSWE= +github.com/ipfs/go-fetcher v1.6.1/go.mod h1:27d/xMV8bodjVs9pugh/RCjjK2OZ68UgAMspMdingNo= +github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= +github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= +github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= -github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw= github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= +github.com/ipfs/go-ipfs-blockstore v1.3.0/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= +github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7PTTDQNsQ= +github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-chunker v0.0.1 h1:cHUUxKFQ99pozdahi+uSC/3Y6HeRpi9oTeUHbE27SEw= +github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= +github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= +github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= -github.com/ipfs/go-ipfs-exchange-interface v0.2.0 h1:8lMSJmKogZYNo2jjhUs0izT+dck05pqUw4mWNW9Pw6Y= github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= +github.com/ipfs/go-ipfs-exchange-interface v0.2.1 h1:jMzo2VhLKSHbVe+mHNzYgs95n0+t0Q69GQ5WhRDZV/s= +github.com/ipfs/go-ipfs-exchange-interface v0.2.1/go.mod h1:MUsYn6rKbG6CTtsDp+lKJPmVt3ZrCViNyH3rfPGsZ2E= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= +github.com/ipfs/go-ipfs-exchange-offline v0.2.0/go.mod h1:HjwBeW0dvZvfOMwDP0TSKXIHf2s+ksdP4E3MLDRtLKY= github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= +github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= +github.com/ipfs/go-ipfs-files v0.3.0 h1:fallckyc5PYjuMEitPNrjRfpwl7YFt69heCOUhsbGxQ= +github.com/ipfs/go-ipfs-files v0.3.0/go.mod h1:xAUtYMwB+iu/dtf6+muHNSFQCJG2dSiStR2P6sn9tIM= +github.com/ipfs/go-ipfs-keystore v0.1.0/go.mod h1:LvLw7Qhnb0RlMOfCzK6OmyWxICip6lQ06CCmdbee75U= +github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= +github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= +github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= +github.com/ipfs/go-ipfs-redirects-file v0.1.1/go.mod h1:tAwRjCV0RjLTjH8DR/AU7VYvfQECg+lpUy2Mdzv7gyk= +github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= github.com/ipfs/go-ipfs-routing v0.3.0 h1:9W/W3N+g+y4ZDeffSgqhgo7BsBSJwPMcyssET9OWevc= github.com/ipfs/go-ipfs-routing v0.3.0/go.mod h1:dKqtTFIql7e1zYsEuWLyuOU+E0WJWW8JjbTPLParDWo= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= -github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= +github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= +github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipld-format v0.4.0 h1:yqJSaJftjmjc9jEOFYlpkwOLVKv68OD27jFLlSghBlQ= +github.com/ipfs/go-ipld-format v0.3.1/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= +github.com/ipfs/go-ipld-format v0.5.0 h1:WyEle9K96MSrvr47zZHKKcDxJ/vlpET6PSiQsAFO+Ds= +github.com/ipfs/go-ipld-format v0.5.0/go.mod h1:ImdZqJQaEouMjCvqCe0ORUS+uoBmf7Hf+EO/jh+nk3M= github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= -github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2cdcc= github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= -github.com/ipfs/go-ipns v0.3.0 h1:ai791nTgVo+zTuq2bLvEGmWP1M0A6kGTXUsgv/Yq67A= +github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= +github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= +github.com/ipfs/go-ipns v0.2.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= github.com/ipfs/go-ipns v0.3.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= +github.com/ipfs/go-libipfs v0.1.0/go.mod h1:qX0d9h+wu53PFtCTXxdXVBakd6ZCvGDdkZUKmdLMLx0= +github.com/ipfs/go-libipfs v0.3.0/go.mod h1:pSUHZ5qPJTAidsxe9bAeHp3KIiw2ODEW2a2kM3v+iXI= +github.com/ipfs/go-libipfs v0.4.0/go.mod h1:XsU2cP9jBhDrXoJDe0WxikB8XcVmD3k2MEZvB3dbYu8= github.com/ipfs/go-libipfs v0.6.0 h1:3FuckAJEm+zdHbHbf6lAyk0QUzc45LsFcGw102oBCZM= github.com/ipfs/go-libipfs v0.6.0/go.mod h1:UjjDIuehp2GzlNP0HEr5I9GfFT7zWgst+YfpUEIThtw= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= @@ -1056,30 +1183,69 @@ github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72g github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= -github.com/ipfs/go-merkledag v0.10.0 h1:IUQhj/kzTZfam4e+LnaEpoiZ9vZF6ldimVlby+6OXL4= +github.com/ipfs/go-merkledag v0.6.0/go.mod h1:9HSEwRd5sV+lbykiYP+2NC/3o6MZbKNaa4hfNcH5iH0= +github.com/ipfs/go-merkledag v0.9.0/go.mod h1:bPHqkHt5OZ0p1n3iqPeDiw2jIBkjAytRjS3WSBwjq90= github.com/ipfs/go-merkledag v0.10.0/go.mod h1:zkVav8KiYlmbzUzNM6kENzkdP5+qR7+2mCwxkQ6GIj8= +github.com/ipfs/go-merkledag v0.11.0 h1:DgzwK5hprESOzS4O1t/wi6JDpyVQdvm9Bs59N/jqfBY= +github.com/ipfs/go-merkledag v0.11.0/go.mod h1:Q4f/1ezvBiJV0YCIXvt51W/9/kqJGH4I1LsA7+djsM4= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-namesys v0.7.0/go.mod h1:KYSZBVZG3VJC34EfqqJPG7T48aWgxseoMPAPA5gLyyQ= +github.com/ipfs/go-path v0.1.1/go.mod h1:vC8q4AKOtrjJz2NnllIrmr2ZbGlF5fW2OKKyhV9ggb0= +github.com/ipfs/go-path v0.3.0/go.mod h1:NOScsVgxfC/eIw4nz6OiGwK42PjaSJ4Y/ZFPn1Xe07I= +github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-peertaskqueue v0.8.0/go.mod h1:cz8hEnnARq4Du5TGqiWKgMr/BOSQ5XOgMOh1K5YYKKM= github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= -github.com/ipfs/go-unixfsnode v1.5.1 h1:JcR3t5C2nM1V7PMzhJ/Qmo19NkoFIKweDSZyDx+CjkI= -github.com/ipld/go-car v0.6.0 h1:d5QrGLnHAxiNLHor+DKGrLdqnM0dQJh2whfSXRDq6J0= -github.com/ipld/go-car v0.6.0/go.mod h1:tBrW1XZ3L2XipLxA69RnTVGW3rve6VX4TbaTYkq8aEA= +github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= +github.com/ipfs/go-unixfs v0.3.1/go.mod h1:h4qfQYzghiIc8ZNFKiLMFWOTzrWIAtzYQ59W/pCFf1o= +github.com/ipfs/go-unixfs v0.4.3/go.mod h1:TSG7G1UuT+l4pNj91raXAPkX0BhJi3jST1FDTfQ5QyM= +github.com/ipfs/go-unixfs v0.4.4/go.mod h1:TSG7G1UuT+l4pNj91raXAPkX0BhJi3jST1FDTfQ5QyM= +github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= +github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= +github.com/ipfs/go-unixfsnode v1.1.2/go.mod h1:5dcE2x03pyjHk4JjamXmunTMzz+VUtqvPwZjIEkfV6s= +github.com/ipfs/go-unixfsnode v1.4.0/go.mod h1:qc7YFFZ8tABc58p62HnIYbUMwj9chhUuFWmxSokfePo= +github.com/ipfs/go-unixfsnode v1.5.1/go.mod h1:ed79DaG9IEuZITJVQn4U6MZDftv6I3ygUBLPfhEbHvk= +github.com/ipfs/go-unixfsnode v1.5.2/go.mod h1:NlOebRwYx8lMCNMdhAhEspYPBD3obp7TE0LvBqHY+ks= +github.com/ipfs/go-unixfsnode v1.7.1/go.mod h1:PVfoyZkX1B34qzT3vJO4nsLUpRCyhnMuHBznRcXirlk= +github.com/ipfs/go-unixfsnode v1.7.4 h1:iLvKyAVKUYOIAW2t4kDYqsT7VLGj31eXJE2aeqGfbwA= +github.com/ipfs/go-unixfsnode v1.7.4/go.mod h1:PVfoyZkX1B34qzT3vJO4nsLUpRCyhnMuHBznRcXirlk= +github.com/ipfs/interface-go-ipfs-core v0.9.0/go.mod h1:F3EcmDy53GFkF0H3iEJpfJC320fZ/4G60eftnItrrJ0= +github.com/ipfs/interface-go-ipfs-core v0.10.0/go.mod h1:F3EcmDy53GFkF0H3iEJpfJC320fZ/4G60eftnItrrJ0= +github.com/ipld/go-car v0.5.0/go.mod h1:ppiN5GWpjOZU9PgpAZ9HbZd9ZgSpwPMr48fGRJOWmvE= +github.com/ipld/go-car v0.6.2 h1:Hlnl3Awgnq8icK+ze3iRghk805lu8YNq3wlREDTF2qc= +github.com/ipld/go-car v0.6.2/go.mod h1:oEGXdwp6bmxJCZ+rARSkDliTeYnVzv3++eXajZ+Bmr8= github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= -github.com/ipld/go-car/v2 v2.5.1 h1:U2ux9JS23upEgrJScW8VQuxmE94560kYxj9CQUpcfmk= github.com/ipld/go-car/v2 v2.5.1/go.mod h1:jKjGOqoCj5zn6KjnabD6JbnCsMntqU2hLiU6baZVO3E= +github.com/ipld/go-car/v2 v2.8.0/go.mod h1:a+BnAxUqgr7wcWxW/lI6ctyEQ2v9gjBChPytwFMp2f4= +github.com/ipld/go-car/v2 v2.10.1/go.mod h1:sQEkXVM3csejlb1kCCb+vQ/pWBKX9QtvsrysMQjOgOg= +github.com/ipld/go-car/v2 v2.11.0 h1:lkAPwbbTFqbdfawgm+bfmFc8PjGC7D12VcaLXPCLNfM= +github.com/ipld/go-car/v2 v2.11.0/go.mod h1:aDszqev0zjtU8l96g4lwXHaU9bzArj56Y7eEN0q/xqA= github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= +github.com/ipld/go-codec-dagpb v1.3.1/go.mod h1:ErNNglIi5KMur/MfFE/svtgQthzVvf+43MrzLbpcIZY= +github.com/ipld/go-codec-dagpb v1.4.1/go.mod h1:XdXTO/TUD/ra9RcK/NfmwBfr1JpFxM2uRKaB9oe4LxE= +github.com/ipld/go-codec-dagpb v1.5.0/go.mod h1:0yRIutEFD8o1DGVqw4RSHh+BUTlJA9XWldxaaWR/o4g= github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= +github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= -github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g= +github.com/ipld/go-ipld-prime v0.16.0/go.mod h1:axSCuOCBPqrH+gvXr2w9uAOulJqBPhHPT2PjoiiU1qA= +github.com/ipld/go-ipld-prime v0.18.0/go.mod h1:735yXW548CKrLwVCYXzqx90p5deRJMVVxM9eJ4Qe+qE= +github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= -github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73 h1:TsyATB2ZRRQGTwafJdgEUQkmjOExRV0DNokcihZxbnQ= +github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= +github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd/go.mod h1:wZ8hH8UxeryOs4kJEJaiui/s00hDSbE37OKsL47g+Sw= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= @@ -1095,9 +1261,11 @@ github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0 github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jellydator/ttlcache/v2 v2.11.1/go.mod h1:RtE5Snf0/57e+2cLWFYWCCsLas2Hy3c5Z4n14XmSvTI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.12.1-0.20220721211354-060cc04fc18b h1:izTof8BKh/nE1wrKOrloNA5q4odOarjf+Xpe+4qow98= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -1139,26 +1307,35 @@ github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.10/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= -github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= +github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= +github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.1.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.1.2/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/klauspost/reedsolomon v1.11.1 h1:0gCWQXOB8pVe1Y5SGozDA5t2qoVxX3prsV+qHgI/Fik= -github.com/klauspost/reedsolomon v1.11.1/go.mod h1:FXLZzlJIdfqEnQLdUKWNRuMZg747hZ4oYp2Ml60Lb/k= +github.com/klauspost/reedsolomon v1.11.8 h1:s8RpUW5TK4hjr+djiOpbZJB4ksx+TdYbRH7vHQpwPOY= +github.com/klauspost/reedsolomon v1.11.8/go.mod h1:4bXRN+cVzMdml6ti7qLouuYi32KHJ5MGv0Qd8a47h6A= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/koron/go-ssdp v0.0.2/go.mod h1:XoLfkAiA2KeZsYh4DbHxD7h3nR2AZNqVQOa+LJuqPYs= -github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -1166,6 +1343,7 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -1179,8 +1357,8 @@ github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2 github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= -github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= @@ -1200,32 +1378,44 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= +github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= +github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= +github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= github.com/libp2p/go-libp2p v0.19.0/go.mod h1:Ki9jJXLO2YqrTIFxofV7Twyd3INWPT97+r8hGt7XPjI= -github.com/libp2p/go-libp2p v0.26.3 h1:6g/psubqwdaBqNNoidbRKSTBEYgaOuKBhHl8Q5tO+PM= -github.com/libp2p/go-libp2p v0.26.3/go.mod h1:x75BN32YbwuY0Awm2Uix4d4KOz+/4piInkp4Wr3yOo8= +github.com/libp2p/go-libp2p v0.22.0/go.mod h1:UDolmweypBSjQb2f7xutPnwZ/fxioLbMBxSjRksxxU4= +github.com/libp2p/go-libp2p v0.23.4/go.mod h1:s9DEa5NLR4g+LZS+md5uGU4emjMWFiqkZr6hBTY8UxI= +github.com/libp2p/go-libp2p v0.25.0/go.mod h1:vXHmFpcfl+xIGN4qW58Bw3a0/SKGAesr5/T4IuJHE3o= +github.com/libp2p/go-libp2p v0.25.1/go.mod h1:xnK9/1d9+jeQCVvi/f1g12KqtVi/jP/SijtKV1hML3g= +github.com/libp2p/go-libp2p v0.30.0 h1:9EZwFtJPFBcs/yJTnP90TpN1hgrT/EsFfM+OZuwV87U= +github.com/libp2p/go-libp2p v0.30.0/go.mod h1:nr2g5V7lfftwgiJ78/HrID+pwvayLyqKCEirT2Y3Byg= github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= -github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= +github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= +github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= +github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= +github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= github.com/libp2p/go-libp2p-blankhost v0.3.0/go.mod h1:urPC+7U01nCGgJ3ZsV8jdwTp6Ji9ID0dMTvq+aJ+nZU= +github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= github.com/libp2p/go-libp2p-circuit v0.6.0/go.mod h1:kB8hY+zCpMeScyvFrKrGicRdid6vNXbunKE4rXATZ0M= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= @@ -1251,14 +1441,20 @@ github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQR github.com/libp2p/go-libp2p-core v0.12.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-core v0.14.0/go.mod h1:tLasfcVdTXnixsLB0QYaT1syJOhsbrhG7q6pGrHtBg8= github.com/libp2p/go-libp2p-core v0.15.1/go.mod h1:agSaboYM4hzB1cWekgVReqV5M4g5M+2eNNejV+1EEhs= +github.com/libp2p/go-libp2p-core v0.19.0/go.mod h1:AkA+FUKQfYt1FLNef5fOPlo/naAWjKy/RCjkcPjqzYg= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= +github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-kad-dht v0.21.0 h1:J0Yd22VA+sk0CJRGMgtfHvLVIkZDyJ3AJGiljywIw5U= +github.com/libp2p/go-libp2p-kad-dht v0.19.0/go.mod h1:qPIXdiZsLczhV4/+4EO1jE8ae0YCW4ZOogc4WVIyTEU= github.com/libp2p/go-libp2p-kad-dht v0.21.0/go.mod h1:Bhm9diAFmc6qcWAr084bHNL159srVZRKADdp96Qqd1I= -github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA= +github.com/libp2p/go-libp2p-kad-dht v0.25.0 h1:T2SXQ/VlXTQVLChWY/+OyOsmGMRJvB5kiR+eJt7jtvI= +github.com/libp2p/go-libp2p-kad-dht v0.25.0/go.mod h1:P6fz+J+u4tPigvS5J0kxQ1isksqAhmXiS/pNaEw/nFI= +github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= +github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= +github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= @@ -1267,15 +1463,18 @@ github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxW github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-mplex v0.5.0/go.mod h1:eLImPJLkj3iG5t5lq68w3Vm5NAQ5BcKwrrb2VmOYb3M= +github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= github.com/libp2p/go-libp2p-nat v0.1.0/go.mod h1:DQzAG+QbDYjN1/C3B6vXucLtz3u9rEonLVPtZVzQqks= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= github.com/libp2p/go-libp2p-noise v0.4.0/go.mod h1:BzzY5pyzCYSyJbQy9oD8z5oP2idsafjt4/X42h9DjZU= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= +github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= @@ -1294,8 +1493,9 @@ github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7 github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= github.com/libp2p/go-libp2p-resource-manager v0.2.1/go.mod h1:K+eCkiapf+ey/LADO4TaMpMTP9/Qde/uLlrnRqV4PLQ= -github.com/libp2p/go-libp2p-routing-helpers v0.6.1 h1:tI3rHOf/FDQsxC2pHBaOZiqPJ0MZYyzGAf4V45xla4U= -github.com/libp2p/go-libp2p-routing-helpers v0.6.1/go.mod h1:R289GUxUMzRXIbWGSuUUTPrlVJZ3Y/pPz495+qgXJX8= +github.com/libp2p/go-libp2p-routing-helpers v0.4.0/go.mod h1:dYEAgkVhqho3/YKxfOEGdFMIcWfAFNlZX8iAIihYA2E= +github.com/libp2p/go-libp2p-routing-helpers v0.7.1 h1:kc0kWCZecbBPAiFEHhxfGJZPqjg1g9zV+X+ovR4Tmnc= +github.com/libp2p/go-libp2p-routing-helpers v0.7.1/go.mod h1:cHStPSRC/wgbfpb5jYdMP7zaSmc2wWcb1mkzNr6AR8o= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= @@ -1305,6 +1505,7 @@ github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaT github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= github.com/libp2p/go-libp2p-swarm v0.10.0/go.mod h1:71ceMcV6Rg/0rIQ97rsZWMzto1l9LnNquef+efcRbmA= @@ -1321,24 +1522,30 @@ github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aL github.com/libp2p/go-libp2p-testing v0.7.0/go.mod h1:OLbdn9DbgdMwv00v+tlp1l3oe2Cl+FAjoWIA2pa0X6E= github.com/libp2p/go-libp2p-testing v0.9.0/go.mod h1:Td7kbdkWqYTJYQGTwzlgXwaqldraIanyjuRiAbK/XQU= github.com/libp2p/go-libp2p-testing v0.9.2/go.mod h1:Td7kbdkWqYTJYQGTwzlgXwaqldraIanyjuRiAbK/XQU= +github.com/libp2p/go-libp2p-testing v0.11.0/go.mod h1:qG4sF27dfKFoK9KlVzK2y52LQKhp0VEmLjV5aDqr1Hg= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= github.com/libp2p/go-libp2p-tls v0.4.1/go.mod h1:EKCixHEysLNDlLUoKxv+3f/Lp90O2EXNjTr0UQDnrIw= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= github.com/libp2p/go-libp2p-transport-upgrader v0.7.0/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= github.com/libp2p/go-libp2p-transport-upgrader v0.7.1/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= +github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= +github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= github.com/libp2p/go-libp2p-yamux v0.8.0/go.mod h1:yTkPgN2ib8FHyU1ZcVD7aelzyAqXXwEPbyx+aSKm9h8= github.com/libp2p/go-libp2p-yamux v0.8.1/go.mod h1:rUozF8Jah2dL9LLGyBaBeTQeARdwhefMCTQVQt6QobE= @@ -1353,16 +1560,20 @@ github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3 github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-mplex v0.4.0/go.mod h1:y26Lx+wNVtMYMaPu300Cbot5LkEZ4tJaNYeHeT9dh6E= +github.com/libp2p/go-mplex v0.7.0/go.mod h1:rW8ThnRcYWft/Jb2jeORBmPd6xuG3dGxWN/W168L9EU= github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= github.com/libp2p/go-msgio v0.2.0/go.mod h1:dBVM1gW3Jk9XqHkU4eKdGvVHdLa51hoGfll6jMJMSlY= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= -github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= @@ -1375,11 +1586,13 @@ github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= -github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= @@ -1394,34 +1607,44 @@ github.com/libp2p/go-stream-muxer-multistream v0.4.0/go.mod h1:nb+dGViZleRP4XcyH github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= +github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= github.com/libp2p/go-tcp-transport v0.5.0/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y= github.com/libp2p/go-tcp-transport v0.5.1/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y= +github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= +github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= github.com/libp2p/go-ws-transport v0.6.0/go.mod h1:dXqtI9e2JV9FtF1NOtWVZSKXh5zXvnuwPXfj8GPBbYU= github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= github.com/libp2p/go-yamux/v3 v3.0.1/go.mod h1:s2LsDhHbh+RfCsQoICSYt58U2f8ijtPANFD8BmE74Bo= github.com/libp2p/go-yamux/v3 v3.0.2/go.mod h1:s2LsDhHbh+RfCsQoICSYt58U2f8ijtPANFD8BmE74Bo= github.com/libp2p/go-yamux/v3 v3.1.1/go.mod h1:jeLEQgLXqE2YqX1ilAClIfCMDY+0uXQUKmmb/qp0gT4= -github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ= +github.com/libp2p/go-yamux/v3 v3.1.2/go.mod h1:jeLEQgLXqE2YqX1ilAClIfCMDY+0uXQUKmmb/qp0gT4= github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= +github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= +github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= github.com/lucas-clemente/quic-go v0.25.0/go.mod h1:YtzP8bxRVCBlO77yRanE264+fY/T2U9ZlW1AaHOsMOg= github.com/lucas-clemente/quic-go v0.27.0/go.mod h1:AzgQoPda7N+3IqMMMkywBKggIFo2KT6pfnlrQ2QieeI= +github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0= +github.com/lucas-clemente/quic-go v0.29.1/go.mod h1:CTcNfLYJS2UuRNB+zcNlgvkjBhxX6Hm3WUxxAQx2mgE= github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= @@ -1433,8 +1656,9 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= @@ -1445,10 +1669,15 @@ github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZE github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= github.com/marten-seemann/qtls-go1-17 v0.1.1/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= +github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= github.com/marten-seemann/qtls-go1-18 v0.1.0-beta.1/go.mod h1:PUhIQk19LoFt2174H4+an8TYvWOGjb/hHwphBeaDHwI= github.com/marten-seemann/qtls-go1-18 v0.1.1/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= +github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= +github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/marten-seemann/webtransport-go v0.1.1/go.mod h1:kBEh5+RSvOA4troP1vyOVBWK4MIMzDICXVrvCPrYcrM= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= @@ -1468,13 +1697,16 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -1485,11 +1717,13 @@ github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00v github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= +github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -1507,8 +1741,9 @@ github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+ github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -1564,8 +1799,11 @@ github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9x github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= github.com/multiformats/go-multiaddr v0.4.1/go.mod h1:3afI9HfVW8csiF8UZqtpYRiDyew8pRX7qLIGHu9FLuM= github.com/multiformats/go-multiaddr v0.5.0/go.mod h1:3KAxNkUqLTJ20AAwN4XVX4kZar+bR+gh4zgbfr3SNug= -github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= +github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM= +github.com/multiformats/go-multiaddr v0.7.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr v0.11.0 h1:XqGyJ8ufbCE0HmTDwx2kPdsrQ36AGPZNZX6s6xfJH10= +github.com/multiformats/go-multiaddr v0.11.0/go.mod h1:gWUm0QLR4thQ6+ZF6SXUw8YjtwQSPapICM+NmCkxHSM= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= @@ -1584,13 +1822,20 @@ github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysj github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= -github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.3.1-0.20211210143421-a526f306ed2c/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= github.com/multiformats/go-multicodec v0.4.1/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= -github.com/multiformats/go-multicodec v0.8.0 h1:evBmgkbSQux+Ds2IgfhkO38Dl2GDtRW8/Rp6YiSHX/Q= +github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= +github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= github.com/multiformats/go-multicodec v0.8.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= +github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -1599,14 +1844,19 @@ github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUj github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= -github.com/multiformats/go-multihash v0.2.2-0.20221030163302-608669da49b6 h1:qLF997Rz0X1WvdcZ2r5CUkLZ2rvdiXwG1JRSrJZEAuE= -github.com/multiformats/go-multihash v0.2.2-0.20221030163302-608669da49b6/go.mod h1:kaHxr8TfO1cxIR/tYxgZ7e59HraJq8arEQQR8E/YNvI= +github.com/multiformats/go-multihash v0.2.0/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= +github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= github.com/multiformats/go-multistream v0.3.0/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= github.com/multiformats/go-multistream v0.3.1/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= +github.com/multiformats/go-multistream v0.3.3/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= +github.com/multiformats/go-multistream v0.4.0/go.mod h1:BS6ZSYcA4NwYEaIMeCtpJydp2Dc+fNRA6uJMSu/m8+4= github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo= github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= @@ -1652,8 +1902,15 @@ github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvw github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.5.1 h1:auzK7OI497k6x4OvWq+TKAcpcSAlod0doAH72oIN0Jw= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.2.0/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1661,16 +1918,26 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= +github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= +github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 h1:CznVS40zms0Dj5he4ERo+fRPtO0qxUk8lA8Xu3ddet0= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333/go.mod h1:Ag6rSXkHIckQmjFBCweJEEt1mrTPBv8b9W4aU/NQWfI= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= +github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -1684,6 +1951,7 @@ github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -1695,19 +1963,21 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= -github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= +github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -1718,6 +1988,7 @@ github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUI github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= @@ -1734,16 +2005,19 @@ github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66Id github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1757,8 +2031,9 @@ github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1769,25 +2044,29 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pyroscope-io/client v0.7.1 h1:yFRhj3vbgjBxehvxQmedmUWJQ4CAfCHhn+itPsuWsHw= -github.com/pyroscope-io/client v0.7.1/go.mod h1:4h21iOU4pUOq0prKyDlvYRL+SCKsBc5wKiEtV+rJGqU= -github.com/pyroscope-io/godeltaprof v0.1.0 h1:UBqtjt0yZi4jTxqZmLAs34XG6ycS3vUTlhEUSq4NHLE= -github.com/pyroscope-io/godeltaprof v0.1.0/go.mod h1:psMITXp90+8pFenXkKIpNhrfmI9saQnPbba27VIaiQE= +github.com/pyroscope-io/client v0.7.2 h1:OX2qdUQsS8RSkn/3C8isD7f/P0YiZQlRbAlecAaj/R8= +github.com/pyroscope-io/client v0.7.2/go.mod h1:FEocnjn+Ngzxy6EtU9ZxXWRvQ0+pffkrBxHLnPpxwi8= +github.com/pyroscope-io/godeltaprof v0.1.2 h1:MdlEmYELd5w+lvIzmZvXGNMVzW2Qc9jDMuJaPOR75g4= +github.com/pyroscope-io/godeltaprof v0.1.2/go.mod h1:psMITXp90+8pFenXkKIpNhrfmI9saQnPbba27VIaiQE= github.com/pyroscope-io/otel-profiling-go v0.4.0 h1:Hk/rbUqOWoByoWy1tt4r5BX5xoKAvs5drr0511Ki8ic= github.com/pyroscope-io/otel-profiling-go v0.4.0/go.mod h1:MXaofiWU7PgLP7eISUZJYVO4Z8WYMqpkYgeP4XrPLyg= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-19 v0.2.1 h1:aJcKNMkH5ASEJB9FXNeZCyTEIHU1J7MmHyz1Q1TSG1A= -github.com/quic-go/qtls-go1-19 v0.2.1/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= -github.com/quic-go/qtls-go1-20 v0.1.1 h1:KbChDlg82d3IHqaj2bn6GfKRj84Per2VGf5XV3wSwQk= -github.com/quic-go/qtls-go1-20 v0.1.1/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= -github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0= -github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA= -github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk= -github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/quic-go/qtls-go1-18 v0.2.0/go.mod h1:moGulGHK7o6O8lSPSZNoOwcLvJKJ85vVNc7oJFD65bc= +github.com/quic-go/qtls-go1-19 v0.2.0/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= +github.com/quic-go/qtls-go1-20 v0.1.0/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= +github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI= +github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= +github.com/quic-go/quic-go v0.32.0/go.mod h1:/fCsKANhQIeD5l76c2JFU+07gVE3KaA0FP+0zMWwfwo= +github.com/quic-go/quic-go v0.37.6 h1:2IIUmQzT5YNxAiaPGjs++Z4hGOtIR0q79uS5qE9ccfY= +github.com/quic-go/quic-go v0.37.6/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU= +github.com/quic-go/webtransport-go v0.5.1/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= +github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= @@ -1802,28 +2081,30 @@ github.com/regen-network/cosmos-proto v0.3.1/go.mod h1:jO0sVX6a1B36nmE8C9xBFXpNw github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= -github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= -github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= -github.com/rs/zerolog v1.27.0 h1:1T7qCieN22GVc8S4Q2yuexzBb1EqjbgjSH9RohbMjKs= -github.com/rs/zerolog v1.27.0/go.mod h1:7frBqO0oezxmnO7GF86FY++uy8I0Tk/If5ni1G9Qc0U= +github.com/rs/zerolog v1.29.1 h1:cO+d60CHkknCbvzEWxP0S9K6KqyTjrCNUy1LdQLCGPc= +github.com/rs/zerolog v1.29.1/go.mod h1:Le6ESbR7hc+DP6Lt1THiV8CQSdkkNrd3R0XbEgp3ZBU= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samber/lo v1.36.0/go.mod h1:HLeWcJRRyLKp3+/XBJvOrerCQn9mhdKMHyd7IRlgeQ8= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= @@ -1864,10 +2145,13 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= @@ -1889,8 +2173,8 @@ github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -1904,6 +2188,7 @@ github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+eg github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= +github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -1923,11 +2208,11 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= @@ -1937,21 +2222,25 @@ github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2l github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= github.com/tendermint/tm-db v0.6.7 h1:fE00Cbl0jayAoqlExN6oyQJ7fR/ZtoVOmvPJ//+shu8= github.com/tendermint/tm-db v0.6.7/go.mod h1:byQDzFkZV1syXr/ReXS808NxA2xvyuuVgXOJ/088L6I= +github.com/thoas/go-funk v0.9.1/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= github.com/tidwall/btree v1.5.0 h1:iV0yVY/frd7r6qGBXfEYs7DH0gTDgrKTrDjS7xt/IyQ= github.com/tidwall/btree v1.5.0/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYmsObdKE= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.14.0 h1:6aeJ0bzojgWLa82gDQHcx3S0Lr/O51I9bJ5nv6JFx5w= github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= +github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.4/go.mod h1:098SZ494YoMWPmMO6ct4dcFnqxwj9r/gF0Etp19pSNM= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= @@ -1964,6 +2253,8 @@ github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqri github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= +github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -1978,6 +2269,7 @@ github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q= +github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= @@ -1985,23 +2277,34 @@ github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49u github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= -github.com/warpfork/go-testmark v0.11.0 h1:J6LnV8KpceDvo7spaNU4+DauH2n1x+6RaO2rJrmpQ9U= +github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= +github.com/warpfork/go-testmark v0.11.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= +github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= +github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa h1:EyA027ZAkuaCLoxVX4r1TZMPy1d31fM6hbfQ4OU4I5o= +github.com/whyrusleeping/cbor-gen v0.0.0-20221220214510-0333c149dec0/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20230818171029-f91ae536ca25 h1:yVYDLoN2gmB3OdBXFW8e1UwgVbmCvNlnAKhvHPaNARI= +github.com/whyrusleeping/cbor-gen v0.0.0-20230818171029-f91ae536ca25/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -2010,6 +2313,7 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/ybbus/jsonrpc v2.1.2+incompatible/go.mod h1:XJrh1eMSzdIYFbM08flv0wp5G35eRniyeGut1z+LSiE= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -2024,6 +2328,7 @@ github.com/zondax/ledger-go v0.14.1 h1:Pip65OOl4iJ84WTpA4BKChvOufMhhbxED3BaihoZN github.com/zondax/ledger-go v0.14.1/go.mod h1:fZ3Dqg6qcdXWSOJFKMG8GCTnD7slO/RL2feOQv8K320= gitlab.com/NebulousLabs/errors v0.0.0-20171229012116-7ead97ef90b8/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975 h1:L/ENs/Ar1bFzUeKx6m3XjlmBgIUlykX9dzvp5k9NGxc= +gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40 h1:dizWJqTWjwyD8KGcMOwgrkqu1JIkofYgKkmDeNE7oAs= gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40/go.mod h1:rOnSnoRyxMI3fe/7KIbVcsHRGxe30OONv8dEgo+vCfA= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -2043,60 +2348,71 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/runtime v0.42.0 h1:EbmAUG9hEAMXyfWEasIt2kmh/WmXUznUksChApTgBGc= +go.opentelemetry.io/contrib/instrumentation/runtime v0.42.0/go.mod h1:rD9feqRYP24P14t5kmhNMqsqm1jvKmpx2H2rKVw52V8= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4= -go.opentelemetry.io/otel v1.13.0 h1:1ZAKnNQKwBBxFtww/GwxNUyTf0AxkZzrukO8MeXqe4Y= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 h1:htgM8vZIF8oPSCxa341e3IZ4yr/sKxgu8KZYllByiVY= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.34.0 h1:kpskzLZ60cJ48SJ4uxWa6waBL+4kSV6nVK8rP+QM8Wg= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.34.0/go.mod h1:4+x3i62TEegDHuzNva0bMcAN8oUi5w4liGb1d/VgPYo= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.34.0 h1:t4Ajxj8JGjxkqoBtbkCOY2cDUl9RwiNE9LPQavooi9U= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.34.0/go.mod h1:WO7omosl4P7JoanH9NgInxDxEn2F2M5YinIh8EyeT8w= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 h1:fqR1kli93643au1RKo0Uma3d2aPQKT+WBKfTSBaKbOc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2/go.mod h1:5Qn6qvgkMsLDX+sYK64rHb1FPhpn0UtxF+ouX1uhyJE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.2 h1:Us8tbCmuN16zAnK5TC69AtODLycKbwnskQzaB6DfFhc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.2/go.mod h1:GZWSQQky8AgdJj50r1KJm8oiQiIPaAX7uZCFQX9GzC8= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0 h1:f6BwB2OACc3FCbYVznctQ9V6KK7Vq6CjmYXJ7DeSs4E= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0/go.mod h1:UqL5mZ3qs6XYhDnZaW1Ps4upD+PX6LipH40AoeuIlwU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.39.0 h1:IZXpCEtI7BbX01DRQEWTGDkvjMB6hEhiEZXS+eg2YqY= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.39.0/go.mod h1:xY111jIZtWb+pUUgT4UiiSonAaY2cD2Ts5zvuKLki3o= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 h1:iqjq9LAB8aK++sKVcELezzn655JnBNdsDhghU4G/So8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0/go.mod h1:hGXzO5bhhSHZnKvrDaXB82Y9DRFour0Nz/KrBh7reWw= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8= -go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.11.2 h1:GF4JoaEx7iihdMFu30sOyRx52HDHOkl9xQ8SMqNXUiU= -go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rqyLc3SyX9aU= -go.opentelemetry.io/otel/sdk/metric v0.34.0 h1:7ElxfQpXCFZlRTvVRTkcUvK8Gt5DC8QzmzsLsO2gdzo= -go.opentelemetry.io/otel/sdk/metric v0.34.0/go.mod h1:l4r16BIqiqPy5rd14kkxllPy/fOI4tWo1jkpD9Z3ffQ= +go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= +go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= +go.opentelemetry.io/otel/sdk/metric v0.39.0 h1:Kun8i1eYf48kHH83RucG93ffz0zGV1sh46FAScOTuDI= +go.opentelemetry.io/otel/sdk/metric v0.39.0/go.mod h1:piDIRgjcK7u0HCL5pCA4e74qpK/jk3NiUoAHATVAmiI= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= -go.opentelemetry.io/otel/trace v1.13.0 h1:CBgRZ6ntv+Amuj1jDsMhZtlAPT6gbyIRdaIzFhfBSdY= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.16.1 h1:+alNIBsl0qfY0j6epRubp/9obgtrObRAc5aD+6jbWY8= -go.uber.org/dig v1.16.1/go.mod h1:557JTAUZT5bUK0SvCwikmLPPtdQhfvLYtO5tJgQSbnk= -go.uber.org/fx v1.19.3 h1:YqMRE4+2IepTYCMOvXqQpRa+QAVdiSTnsHU4XNWBceA= -go.uber.org/fx v1.19.3/go.mod h1:w2HrQg26ql9fLK7hlBiZ6JsRUKV+Lj/atT1KCjT8YhM= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.15.0/go.mod h1:pKHs0wMynzL6brANhB2hLMro+zalv1osARTviTcqHLM= +go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= +go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= +go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= +go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ= +go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= @@ -2108,8 +2424,11 @@ go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= +go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= +go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -2124,6 +2443,7 @@ golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -2134,6 +2454,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -2148,9 +2469,17 @@ golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5 golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2167,9 +2496,14 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= -golang.org/x/exp v0.0.0-20210714144626-1041f73d31d8/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= -golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg= -golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20220916125017-b168a2c6b86b/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20221205204356-47842c84f3db/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230129154200-a960b3787bd2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2199,10 +2533,14 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2223,7 +2561,9 @@ golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2280,11 +2620,20 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220920183852-bf014ff85ad5/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220923203811-8be639271d50/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2312,8 +2661,8 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2329,8 +2678,9 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2347,6 +2697,7 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2354,8 +2705,10 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2434,6 +2787,7 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2445,8 +2799,12 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220429233432-b5fbb4746d32/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2458,15 +2816,26 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2476,9 +2845,13 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2486,6 +2859,7 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2549,6 +2923,7 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -2556,10 +2931,13 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2572,6 +2950,8 @@ golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNq gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= +gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= @@ -2627,8 +3007,8 @@ google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ= -google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2752,8 +3132,12 @@ google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqw google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230815205213-6bfd019c3878 h1:lv6/DhyiFFGsmzxbsUUTOkN29II+zeWHxvT8Lpdxsv0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230815205213-6bfd019c3878/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2790,7 +3174,6 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= @@ -2800,8 +3183,8 @@ google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2818,12 +3201,13 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 h1:KR8+MyP7/qOlV+8Af01LtjL04bu7on42eVsxT4EyBQk= -google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -2836,6 +3220,7 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= @@ -2857,6 +3242,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -2872,12 +3258,14 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= +lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= pgregory.net/rapid v0.5.3 h1:163N50IHFqr1phZens4FQOdPgfJscR7a562mjQqeo4M= +pgregory.net/rapid v0.5.3/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/header/header.go b/header/header.go index 25c3b0e5cf..92f8538696 100644 --- a/header/header.go +++ b/header/header.go @@ -2,14 +2,15 @@ package header import ( "bytes" - "context" "encoding/json" "fmt" "time" tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/light" core "github.com/tendermint/tendermint/types" + "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/pkg/da" libhead "github.com/celestiaorg/go-header" "github.com/celestiaorg/rsmt2d" @@ -17,7 +18,6 @@ import ( // ConstructFn aliases a function that creates an ExtendedHeader. type ConstructFn = func( - context.Context, *core.Header, *core.Commit, *core.ValidatorSet, @@ -44,42 +44,25 @@ type ExtendedHeader struct { DAH *DataAvailabilityHeader `json:"dah"` } -func (eh *ExtendedHeader) New() libhead.Header { - return new(ExtendedHeader) -} - -func (eh *ExtendedHeader) IsZero() bool { - return eh == nil -} - -func (eh *ExtendedHeader) ChainID() string { - return eh.RawHeader.ChainID -} - -func (eh *ExtendedHeader) Height() int64 { - return eh.RawHeader.Height -} - -func (eh *ExtendedHeader) Time() time.Time { - return eh.RawHeader.Time -} - -var _ libhead.Header = &ExtendedHeader{} - // MakeExtendedHeader assembles new ExtendedHeader. func MakeExtendedHeader( - _ context.Context, h *core.Header, comm *core.Commit, vals *core.ValidatorSet, eds *rsmt2d.ExtendedDataSquare, ) (*ExtendedHeader, error) { - var dah DataAvailabilityHeader + var ( + dah DataAvailabilityHeader + err error + ) switch eds { case nil: dah = EmptyDAH() default: - dah = da.NewDataAvailabilityHeader(eds) + dah, err = da.NewDataAvailabilityHeader(eds) + } + if err != nil { + return nil, err } eh := &ExtendedHeader{ @@ -88,14 +71,34 @@ func MakeExtendedHeader( Commit: comm, ValidatorSet: vals, } - return eh, eh.Validate() + return eh, nil +} + +func (eh *ExtendedHeader) New() *ExtendedHeader { + return new(ExtendedHeader) +} + +func (eh *ExtendedHeader) IsZero() bool { + return eh == nil +} + +func (eh *ExtendedHeader) ChainID() string { + return eh.RawHeader.ChainID +} + +func (eh *ExtendedHeader) Height() uint64 { + return uint64(eh.RawHeader.Height) +} + +func (eh *ExtendedHeader) Time() time.Time { + return eh.RawHeader.Time } // Hash returns Hash of the wrapped RawHeader. // NOTE: It purposely overrides Hash method of RawHeader to get it directly from Commit without // recomputing. func (eh *ExtendedHeader) Hash() libhead.Hash { - return libhead.Hash(eh.Commit.BlockID.Hash) + return eh.Commit.BlockID.Hash.Bytes() } // LastHeader returns the Hash of the last wrapped RawHeader. @@ -115,6 +118,11 @@ func (eh *ExtendedHeader) Validate() error { return fmt.Errorf("ValidateBasic error on RawHeader at height %d: %w", eh.Height(), err) } + if eh.RawHeader.Version.App != appconsts.LatestVersion { + return fmt.Errorf("app version mismatch, expected: %d, got %d", appconsts.LatestVersion, + eh.RawHeader.Version.App) + } + err = eh.Commit.ValidateBasic() if err != nil { return fmt.Errorf("ValidateBasic error on Commit at height %d: %w", eh.Height(), err) @@ -134,8 +142,8 @@ func (eh *ExtendedHeader) Validate() error { // ensure data root from raw header matches computed root if !bytes.Equal(eh.DAH.Hash(), eh.DataHash) { - panic(fmt.Sprintf("mismatch between data hash commitment from core header and computed data root "+ - "at height %d: data hash: %X, computed root: %X", eh.Height(), eh.DataHash, eh.DAH.Hash())) + return fmt.Errorf("mismatch between data hash commitment from core header and computed data root "+ + "at height %d: data hash: %X, computed root: %X", eh.Height(), eh.DataHash, eh.DAH.Hash()) } // Make sure the header is consistent with the commit. @@ -146,7 +154,8 @@ func (eh *ExtendedHeader) Validate() error { return fmt.Errorf("commit signs block %X, header is block %X", chash, hhash) } - if err := eh.ValidatorSet.VerifyCommitLight(eh.ChainID(), eh.Commit.BlockID, eh.Height(), eh.Commit); err != nil { + err = eh.ValidatorSet.VerifyCommitLight(eh.ChainID(), eh.Commit.BlockID, int64(eh.Height()), eh.Commit) + if err != nil { return fmt.Errorf("VerifyCommitLight error at height %d: %w", eh.Height(), err) } @@ -157,6 +166,42 @@ func (eh *ExtendedHeader) Validate() error { return nil } +// Verify validates given untrusted Header against trusted ExtendedHeader. +func (eh *ExtendedHeader) Verify(untrst *ExtendedHeader) error { + isAdjacent := eh.Height()+1 == untrst.Height() + if isAdjacent { + // Optimized verification for adjacent headers + // Check the validator hashes are the same + if !bytes.Equal(untrst.ValidatorsHash, eh.NextValidatorsHash) { + return &libhead.VerifyError{ + Reason: fmt.Errorf("expected old header next validators (%X) to match those from new header (%X)", + eh.NextValidatorsHash, + untrst.ValidatorsHash, + ), + } + } + + if !bytes.Equal(untrst.LastHeader(), eh.Hash()) { + return &libhead.VerifyError{ + Reason: fmt.Errorf("expected new header to point to last header hash (%X), but got %X)", + eh.Hash(), + untrst.LastHeader(), + ), + } + } + + return nil + } + + if err := eh.ValidatorSet.VerifyCommitLightTrusting(eh.ChainID(), untrst.Commit, light.DefaultTrustLevel); err != nil { + return &libhead.VerifyError{ + Reason: err, + SoftFailure: true, + } + } + return nil +} + // MarshalBinary marshals ExtendedHeader to binary. func (eh *ExtendedHeader) MarshalBinary() ([]byte, error) { return MarshalExtendedHeader(eh) @@ -228,3 +273,5 @@ func (eh *ExtendedHeader) UnmarshalJSON(data []byte) error { eh.RawHeader = *rawHeader return nil } + +var _ libhead.Header[*ExtendedHeader] = &ExtendedHeader{} diff --git a/header/headertest/testing.go b/header/headertest/testing.go index 3e0da71d69..65ae8c950f 100644 --- a/header/headertest/testing.go +++ b/header/headertest/testing.go @@ -26,7 +26,8 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" ) var log = logging.Logger("headertest") @@ -157,9 +158,9 @@ func (s *TestSuite) NextHeader() *header.ExtendedHeader { } func (s *TestSuite) GenRawHeader( - height int64, lastHeader, lastCommit, dataHash libhead.Hash) *header.RawHeader { + height uint64, lastHeader, lastCommit, dataHash libhead.Hash) *header.RawHeader { rh := RandRawHeader(s.t) - rh.Height = height + rh.Height = int64(height) rh.Time = time.Now() rh.LastBlockID = types.BlockID{Hash: bytes.HexBytes(lastHeader)} rh.LastCommitHash = bytes.HexBytes(lastCommit) @@ -298,7 +299,7 @@ func RandBlockID(*testing.T) types.BlockID { // FraudMaker creates a custom ConstructFn that breaks the block at the given height. func FraudMaker(t *testing.T, faultHeight int64, bServ blockservice.BlockService) header.ConstructFn { log.Warn("Corrupting block...", "height", faultHeight) - return func(ctx context.Context, + return func( h *types.Header, comm *types.Commit, vals *types.ValidatorSet, @@ -317,14 +318,15 @@ func FraudMaker(t *testing.T, faultHeight int64, bServ blockservice.BlockService } return eh, nil } - return header.MakeExtendedHeader(ctx, h, comm, vals, eds) + return header.MakeExtendedHeader(h, comm, vals, eds) } } func ExtendedHeaderFromEDS(t *testing.T, height uint64, eds *rsmt2d.ExtendedDataSquare) *header.ExtendedHeader { valSet, vals := RandValidatorSet(10, 10) gen := RandRawHeader(t) - dah := da.NewDataAvailabilityHeader(eds) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) gen.DataHash = dah.Hash() gen.ValidatorsHash = valSet.Hash() @@ -349,17 +351,16 @@ func ExtendedHeaderFromEDS(t *testing.T, height uint64, eds *rsmt2d.ExtendedData func CreateFraudExtHeader( t *testing.T, eh *header.ExtendedHeader, - dag blockservice.BlockService, + serv blockservice.BlockService, ) (*header.ExtendedHeader, *rsmt2d.ExtendedDataSquare) { - extended := share.RandEDS(t, 2) - shares := share.ExtractEDS(extended) - copy(shares[0][share.NamespaceSize:], shares[1][share.NamespaceSize:]) - extended, err := share.ImportShares(context.Background(), shares, dag) + square := edstest.RandByzantineEDS(t, len(eh.DAH.RowRoots)) + err := ipld.ImportEDS(context.Background(), square, serv) + require.NoError(t, err) + dah, err := da.NewDataAvailabilityHeader(square) require.NoError(t, err) - dah := da.NewDataAvailabilityHeader(extended) eh.DAH = &dah eh.RawHeader.DataHash = dah.Hash() - return eh, extended + return eh, square } type Subscriber struct { diff --git a/header/headertest/verify_test.go b/header/headertest/verify_test.go index 78da779a54..7ef16afc8d 100644 --- a/header/headertest/verify_test.go +++ b/header/headertest/verify_test.go @@ -3,33 +3,32 @@ package headertest import ( "strconv" "testing" - "time" "github.com/stretchr/testify/assert" tmrand "github.com/tendermint/tendermint/libs/rand" - libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/celestia-node/header" ) func TestVerify(t *testing.T) { h := NewTestSuite(t, 2).GenExtendedHeaders(3) trusted, untrustedAdj, untrustedNonAdj := h[0], h[1], h[2] tests := []struct { - prepare func() libhead.Header + prepare func() *header.ExtendedHeader err bool }{ { - prepare: func() libhead.Header { return untrustedAdj }, + prepare: func() *header.ExtendedHeader { return untrustedAdj }, err: false, }, { - prepare: func() libhead.Header { + prepare: func() *header.ExtendedHeader { return untrustedNonAdj }, err: false, }, { - prepare: func() libhead.Header { + prepare: func() *header.ExtendedHeader { untrusted := *untrustedAdj untrusted.ValidatorsHash = tmrand.Bytes(32) return &untrusted @@ -37,15 +36,7 @@ func TestVerify(t *testing.T) { err: true, }, { - prepare: func() libhead.Header { - untrusted := *untrustedNonAdj - untrusted.Commit = NewTestSuite(t, 2).Commit(RandRawHeader(t)) - return &untrusted - }, - err: true, - }, - { - prepare: func() libhead.Header { + prepare: func() *header.ExtendedHeader { untrusted := *untrustedAdj untrusted.RawHeader.LastBlockID.Hash = tmrand.Bytes(32) return &untrusted @@ -53,30 +44,10 @@ func TestVerify(t *testing.T) { err: true, }, { - prepare: func() libhead.Header { - untrustedAdj.RawHeader.Time = untrustedAdj.RawHeader.Time.Add(time.Minute) - return untrustedAdj - }, - err: true, - }, - { - prepare: func() libhead.Header { - untrustedAdj.RawHeader.Time = untrustedAdj.RawHeader.Time.Truncate(time.Hour) - return untrustedAdj - }, - err: true, - }, - { - prepare: func() libhead.Header { - untrustedAdj.RawHeader.ChainID = "toaster" - return untrustedAdj - }, - err: true, - }, - { - prepare: func() libhead.Header { - untrustedAdj.RawHeader.Height++ - return untrustedAdj + prepare: func() *header.ExtendedHeader { + untrusted := *untrustedNonAdj + untrusted.Commit = NewTestSuite(t, 2).Commit(RandRawHeader(t)) + return &untrusted }, err: true, }, diff --git a/header/serde.go b/header/serde.go index f4763e3b3b..a511a1352b 100644 --- a/header/serde.go +++ b/header/serde.go @@ -61,7 +61,7 @@ func UnmarshalExtendedHeader(data []byte) (*ExtendedHeader, error) { return nil, err } - return out, out.Validate() + return out, nil } func ExtendedHeaderToProto(eh *ExtendedHeader) (*header_pb.ExtendedHeader, error) { diff --git a/header/verify.go b/header/verify.go deleted file mode 100644 index 18e7f91ea6..0000000000 --- a/header/verify.go +++ /dev/null @@ -1,84 +0,0 @@ -package header - -import ( - "bytes" - "fmt" - "time" - - "github.com/tendermint/tendermint/light" - - libhead "github.com/celestiaorg/go-header" -) - -// Verify validates given untrusted Header against trusted ExtendedHeader. -func (eh *ExtendedHeader) Verify(untrusted libhead.Header) error { - untrst, ok := untrusted.(*ExtendedHeader) - if !ok { - // if the header of the type was given, something very wrong happens - panic(fmt.Sprintf("invalid header type: expected %T, got %T", eh, untrusted)) - } - - if err := eh.verify(untrst); err != nil { - return &libhead.VerifyError{Reason: err} - } - - isAdjacent := eh.Height()+1 == untrst.Height() - if isAdjacent { - // Optimized verification for adjacent headers - // Check the validator hashes are the same - if !bytes.Equal(untrst.ValidatorsHash, eh.NextValidatorsHash) { - return &libhead.VerifyError{ - Reason: fmt.Errorf("expected old header next validators (%X) to match those from new header (%X)", - eh.NextValidatorsHash, - untrst.ValidatorsHash, - ), - } - } - - if !bytes.Equal(untrst.LastHeader(), eh.Hash()) { - return &libhead.VerifyError{ - Reason: fmt.Errorf("expected new header to point to last header hash (%X), but got %X)", - eh.Hash(), - untrst.LastHeader(), - ), - } - } - - return nil - } - - // Ensure that untrusted commit has enough of trusted commit's power. - err := eh.ValidatorSet.VerifyCommitLightTrusting(eh.ChainID(), untrst.Commit, light.DefaultTrustLevel) - if err != nil { - return &libhead.VerifyError{Reason: err} - } - - return nil -} - -// clockDrift defines how much new header's time can drift into -// the future relative to the now time during verification. -var clockDrift = 10 * time.Second - -// verify performs basic verification of untrusted header. -func (eh *ExtendedHeader) verify(untrst libhead.Header) error { - if untrst.Height() <= eh.Height() { - return fmt.Errorf("untrusted header height(%d) <= current trusted header(%d)", untrst.Height(), eh.Height()) - } - - if untrst.ChainID() != eh.ChainID() { - return fmt.Errorf("untrusted header has different chain %s, not %s", untrst.ChainID(), eh.ChainID()) - } - - if !untrst.Time().After(eh.Time()) { - return fmt.Errorf("untrusted header time(%v) must be after current trusted header(%v)", untrst.Time(), eh.Time()) - } - - now := time.Now() - if !untrst.Time().Before(now.Add(clockDrift)) { - return fmt.Errorf( - "new untrusted header has a time from the future %v (now: %v, clockDrift: %v)", untrst.Time(), now, clockDrift) - } - - return nil -} diff --git a/libs/authtoken/authtoken.go b/libs/authtoken/authtoken.go index c86272790d..3d6645c972 100644 --- a/libs/authtoken/authtoken.go +++ b/libs/authtoken/authtoken.go @@ -1,12 +1,30 @@ package authtoken import ( + "encoding/json" + "github.com/cristalhq/jwt" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/celestiaorg/celestia-node/api/rpc/perms" ) +// ExtractSignedPermissions returns the permissions granted to the token by the passed signer. +// If the token isn't signed by the signer, it will not pass verification. +func ExtractSignedPermissions(signer jwt.Signer, token string) ([]auth.Permission, error) { + tk, err := jwt.ParseAndVerifyString(token, signer) + if err != nil { + return nil, err + } + p := new(perms.JWTPayload) + err = json.Unmarshal(tk.RawClaims(), p) + if err != nil { + return nil, err + } + return p.Allow, nil +} + +// NewSignedJWT returns a signed JWT token with the passed permissions and signer. func NewSignedJWT(signer jwt.Signer, permissions []auth.Permission) (string, error) { token, err := jwt.NewTokenBuilder(signer).Build(&perms.JWTPayload{ Allow: permissions, diff --git a/libs/edssser/edssser.go b/libs/edssser/edssser.go new file mode 100644 index 0000000000..fd11b47fcf --- /dev/null +++ b/libs/edssser/edssser.go @@ -0,0 +1,172 @@ +package edssser + +import ( + "context" + "errors" + "fmt" + "os" + "sync" + "testing" + "time" + + "github.com/ipfs/go-datastore" + + "github.com/celestiaorg/celestia-app/pkg/da" + + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +type Config struct { + EDSSize int + EDSWrites int + EnableLog bool + LogFilePath string + StatLogFreq int + OpTimeout time.Duration +} + +// EDSsser stand for EDS Store Stresser. +type EDSsser struct { + config Config + datastore datastore.Batching + edsstoreMu sync.Mutex + edsstore *eds.Store + + statsFileMu sync.Mutex + statsFile *os.File +} + +func NewEDSsser(path string, datastore datastore.Batching, cfg Config) (*EDSsser, error) { + edsstore, err := eds.NewStore(path, datastore) + if err != nil { + return nil, err + } + + return &EDSsser{ + config: cfg, + datastore: datastore, + edsstore: edsstore, + }, nil +} + +func (ss *EDSsser) Run(ctx context.Context) (stats Stats, err error) { + ss.edsstoreMu.Lock() + defer ss.edsstoreMu.Unlock() + + err = ss.edsstore.Start(ctx) + if err != nil { + return stats, err + } + defer func() { + err = errors.Join(err, ss.edsstore.Stop(ctx)) + }() + + edsHashes, err := ss.edsstore.List() + if err != nil { + return stats, err + } + fmt.Printf("recovered %d EDSes\n\n", len(edsHashes)) + + t := &testing.T{} + for toWrite := ss.config.EDSWrites - len(edsHashes); ctx.Err() == nil && toWrite > 0; toWrite-- { + took, err := ss.put(ctx, t) + + stats.TotalWritten++ + stats.TotalTime += took + if took < stats.MinTime || stats.MinTime == 0 { + stats.MinTime = took + } else if took > stats.MaxTime { + stats.MaxTime = took + } + + if ss.config.EnableLog { + if stats.TotalWritten%ss.config.StatLogFreq == 0 { + stats := stats.Finalize() + fmt.Println(stats) + go func() { + err := ss.dumpStat(stats) + if err != nil { + fmt.Printf("error dumping stats: %s\n", err.Error()) + } + }() + } + if err != nil { + fmt.Printf("ERROR put: %s, took: %v, at: %v\n", err.Error(), took, time.Now()) + continue + } + if took > ss.config.OpTimeout/2 { + fmt.Println("long put", "size", ss.config.EDSSize, "took", took, "at", time.Now()) + continue + } + + fmt.Println("square written", "size", ss.config.EDSSize, "took", took, "at", time.Now()) + } + } + return stats, nil +} + +func (ss *EDSsser) dumpStat(stats Stats) (err error) { + ss.statsFileMu.Lock() + defer ss.statsFileMu.Unlock() + + ss.statsFile, err = os.Create(ss.config.LogFilePath + "/edssser_stats.txt") + if err != nil { + return err + } + + _, err = ss.statsFile.Write([]byte(stats.String())) + if err != nil { + return err + } + + return ss.statsFile.Close() +} + +type Stats struct { + TotalWritten int + TotalTime, MinTime, MaxTime, AvgTime time.Duration + // Deviation ? +} + +func (stats Stats) Finalize() Stats { + if stats.TotalTime != 0 { + stats.AvgTime = stats.TotalTime / time.Duration(stats.TotalWritten) + } + return stats +} + +func (stats Stats) String() string { + return fmt.Sprintf(` +TotalWritten %d +TotalWritingTime %v +MaxTime %s +MinTime %s +AvgTime %s +`, + stats.TotalWritten, + stats.TotalTime, + stats.MaxTime, + stats.MinTime, + stats.AvgTime, + ) +} + +func (ss *EDSsser) put(ctx context.Context, t *testing.T) (time.Duration, error) { + ctx, cancel := context.WithTimeout(ctx, ss.config.OpTimeout) + if ss.config.OpTimeout == 0 { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + + // divide by 2 to get ODS size as expected by RandEDS + square := edstest.RandEDS(t, ss.config.EDSSize/2) + dah, err := da.NewDataAvailabilityHeader(square) + if err != nil { + return 0, err + } + + now := time.Now() + err = ss.edsstore.Put(ctx, dah.Hash(), square) + return time.Since(now), err +} diff --git a/libs/pidstore/pidstore.go b/libs/pidstore/pidstore.go new file mode 100644 index 0000000000..2d4eb870a8 --- /dev/null +++ b/libs/pidstore/pidstore.go @@ -0,0 +1,67 @@ +package pidstore + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/peer" +) + +var ( + storePrefix = datastore.NewKey("pidstore") + peersKey = datastore.NewKey("peers") + + log = logging.Logger("pidstore") +) + +// PeerIDStore is used to store/load peers to/from disk. +type PeerIDStore struct { + ds datastore.Datastore +} + +// NewPeerIDStore creates a new peer ID store backed by the given datastore. +func NewPeerIDStore(ds datastore.Datastore) *PeerIDStore { + return &PeerIDStore{ + ds: namespace.Wrap(ds, storePrefix), + } +} + +// Load loads the peers from datastore and returns them. +func (p *PeerIDStore) Load(ctx context.Context) ([]peer.ID, error) { + log.Debug("Loading peers") + + bin, err := p.ds.Get(ctx, peersKey) + if err != nil { + return nil, fmt.Errorf("pidstore: loading peers from datastore: %w", err) + } + + var peers []peer.ID + err = json.Unmarshal(bin, &peers) + if err != nil { + return nil, fmt.Errorf("pidstore: unmarshalling peer IDs: %w", err) + } + + log.Infow("Loaded peers from disk", "amount", len(peers)) + return peers, nil +} + +// Put persists the given peer IDs to the datastore. +func (p *PeerIDStore) Put(ctx context.Context, peers []peer.ID) error { + log.Debugw("Persisting peers to disk", "amount", len(peers)) + + bin, err := json.Marshal(peers) + if err != nil { + return fmt.Errorf("pidstore: marshal peerlist: %w", err) + } + + if err = p.ds.Put(ctx, peersKey, bin); err != nil { + return fmt.Errorf("pidstore: error writing to datastore: %w", err) + } + + log.Infow("Persisted peers successfully", "amount", len(peers)) + return nil +} diff --git a/libs/pidstore/pidstore_test.go b/libs/pidstore/pidstore_test.go new file mode 100644 index 0000000000..eafceff3fe --- /dev/null +++ b/libs/pidstore/pidstore_test.go @@ -0,0 +1,59 @@ +package pidstore + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "testing" + "time" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/sync" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPutLoad(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer t.Cleanup(cancel) + + peerstore := NewPeerIDStore(sync.MutexWrap(datastore.NewMapDatastore())) + + ids, err := generateRandomPeerList(10) + require.NoError(t, err) + + err = peerstore.Put(ctx, ids) + require.NoError(t, err) + + retrievedPeerlist, err := peerstore.Load(ctx) + require.NoError(t, err) + + assert.Equal(t, len(ids), len(retrievedPeerlist)) + assert.Equal(t, ids, retrievedPeerlist) +} + +func generateRandomPeerList(length int) ([]peer.ID, error) { + peerlist := make([]peer.ID, length) + for i := range peerlist { + key, err := rsa.GenerateKey(rand.Reader, 2096) + if err != nil { + return nil, err + } + + _, pubkey, err := crypto.KeyPairFromStdKey(key) + if err != nil { + return nil, err + } + + peerID, err := peer.IDFromPublicKey(pubkey) + if err != nil { + return nil, err + } + + peerlist[i] = peerID + } + + return peerlist, nil +} diff --git a/libs/utils/address.go b/libs/utils/address.go index a8170e44b9..ae52a03b16 100644 --- a/libs/utils/address.go +++ b/libs/utils/address.go @@ -29,15 +29,14 @@ func ValidateAddr(addr string) (string, error) { return addr, err } - if ip := net.ParseIP(addr); ip == nil { - addrs, err := net.LookupHost(addr) - if err != nil { - return addr, fmt.Errorf("could not resolve %v: %w", addr, err) - } - if len(addrs) == 0 { - return addr, fmt.Errorf("no IP addresses found for DNS record: %v", addr) - } - addr = addrs[0] + ip := net.ParseIP(addr) + if ip != nil { + return addr, nil } - return addr, nil + + resolved, err := net.ResolveIPAddr("ip4", addr) + if err != nil { + return addr, err + } + return resolved.String(), nil } diff --git a/logs/logs.go b/logs/logs.go index 15c26888c5..23d0683996 100644 --- a/logs/logs.go +++ b/logs/logs.go @@ -11,6 +11,7 @@ func SetAllLoggers(level logging.LogLevel) { _ = logging.SetLogLevel("dht", "ERROR") _ = logging.SetLogLevel("swarm2", "WARN") _ = logging.SetLogLevel("bitswap", "WARN") + _ = logging.SetLogLevel("bitswap-client", "WARN") _ = logging.SetLogLevel("connmgr", "WARN") _ = logging.SetLogLevel("nat", "INFO") _ = logging.SetLogLevel("dht/RtRefreshManager", "FATAL") diff --git a/nodebuilder/blob/blob.go b/nodebuilder/blob/blob.go index 7cbe312856..aae502824c 100644 --- a/nodebuilder/blob/blob.go +++ b/nodebuilder/blob/blob.go @@ -3,9 +3,8 @@ package blob import ( "context" - "github.com/celestiaorg/nmt/namespace" - "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/share" ) var _ Module = (*API)(nil) @@ -19,23 +18,23 @@ type Module interface { // Uses default wallet registered on the Node. Submit(_ context.Context, _ []*blob.Blob) (height uint64, _ error) // Get retrieves the blob by commitment under the given namespace and height. - Get(_ context.Context, height uint64, _ namespace.ID, _ blob.Commitment) (*blob.Blob, error) + Get(_ context.Context, height uint64, _ share.Namespace, _ blob.Commitment) (*blob.Blob, error) // GetAll returns all blobs under the given namespaces and height. - GetAll(_ context.Context, height uint64, _ []namespace.ID) ([]*blob.Blob, error) + GetAll(_ context.Context, height uint64, _ []share.Namespace) ([]*blob.Blob, error) // GetProof retrieves proofs in the given namespaces at the given height by commitment. - GetProof(_ context.Context, height uint64, _ namespace.ID, _ blob.Commitment) (*blob.Proof, error) + GetProof(_ context.Context, height uint64, _ share.Namespace, _ blob.Commitment) (*blob.Proof, error) // Included checks whether a blob's given commitment(Merkle subtree root) is included at // given height and under the namespace. - Included(_ context.Context, height uint64, _ namespace.ID, _ *blob.Proof, _ blob.Commitment) (bool, error) + Included(_ context.Context, height uint64, _ share.Namespace, _ *blob.Proof, _ blob.Commitment) (bool, error) } type API struct { Internal struct { - Submit func(context.Context, []*blob.Blob) (uint64, error) `perm:"write"` - Get func(context.Context, uint64, namespace.ID, blob.Commitment) (*blob.Blob, error) `perm:"read"` - GetAll func(context.Context, uint64, []namespace.ID) ([]*blob.Blob, error) `perm:"read"` - GetProof func(context.Context, uint64, namespace.ID, blob.Commitment) (*blob.Proof, error) `perm:"read"` - Included func(context.Context, uint64, namespace.ID, *blob.Proof, blob.Commitment) (bool, error) `perm:"read"` + Submit func(context.Context, []*blob.Blob) (uint64, error) `perm:"write"` + Get func(context.Context, uint64, share.Namespace, blob.Commitment) (*blob.Blob, error) `perm:"read"` + GetAll func(context.Context, uint64, []share.Namespace) ([]*blob.Blob, error) `perm:"read"` + GetProof func(context.Context, uint64, share.Namespace, blob.Commitment) (*blob.Proof, error) `perm:"read"` + Included func(context.Context, uint64, share.Namespace, *blob.Proof, blob.Commitment) (bool, error) `perm:"read"` } } @@ -46,31 +45,31 @@ func (api *API) Submit(ctx context.Context, blobs []*blob.Blob) (uint64, error) func (api *API) Get( ctx context.Context, height uint64, - nID namespace.ID, + namespace share.Namespace, commitment blob.Commitment, ) (*blob.Blob, error) { - return api.Internal.Get(ctx, height, nID, commitment) + return api.Internal.Get(ctx, height, namespace, commitment) } -func (api *API) GetAll(ctx context.Context, height uint64, nIDs []namespace.ID) ([]*blob.Blob, error) { - return api.Internal.GetAll(ctx, height, nIDs) +func (api *API) GetAll(ctx context.Context, height uint64, namespaces []share.Namespace) ([]*blob.Blob, error) { + return api.Internal.GetAll(ctx, height, namespaces) } func (api *API) GetProof( ctx context.Context, height uint64, - nID namespace.ID, + namespace share.Namespace, commitment blob.Commitment, ) (*blob.Proof, error) { - return api.Internal.GetProof(ctx, height, nID, commitment) + return api.Internal.GetProof(ctx, height, namespace, commitment) } func (api *API) Included( ctx context.Context, height uint64, - nID namespace.ID, + namespace share.Namespace, proof *blob.Proof, commitment blob.Commitment, ) (bool, error) { - return api.Internal.Included(ctx, height, nID, proof, commitment) + return api.Internal.Included(ctx, height, namespace, proof, commitment) } diff --git a/nodebuilder/blob/mocks/api.go b/nodebuilder/blob/mocks/api.go index f99d1d8168..5cd34b74b6 100644 --- a/nodebuilder/blob/mocks/api.go +++ b/nodebuilder/blob/mocks/api.go @@ -8,9 +8,10 @@ import ( context "context" reflect "reflect" - blob "github.com/celestiaorg/celestia-node/blob" - namespace "github.com/celestiaorg/nmt/namespace" gomock "github.com/golang/mock/gomock" + + blob "github.com/celestiaorg/celestia-node/blob" + share "github.com/celestiaorg/celestia-node/share" ) // MockModule is a mock of Module interface. @@ -37,7 +38,7 @@ func (m *MockModule) EXPECT() *MockModuleMockRecorder { } // Get mocks base method. -func (m *MockModule) Get(arg0 context.Context, arg1 uint64, arg2 namespace.ID, arg3 blob.Commitment) (*blob.Blob, error) { +func (m *MockModule) Get(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 blob.Commitment) (*blob.Blob, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*blob.Blob) @@ -52,7 +53,7 @@ func (mr *MockModuleMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomoc } // GetAll mocks base method. -func (m *MockModule) GetAll(arg0 context.Context, arg1 uint64, arg2 []namespace.ID) ([]*blob.Blob, error) { +func (m *MockModule) GetAll(arg0 context.Context, arg1 uint64, arg2 []share.Namespace) ([]*blob.Blob, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetAll", arg0, arg1, arg2) ret0, _ := ret[0].([]*blob.Blob) @@ -67,7 +68,7 @@ func (mr *MockModuleMockRecorder) GetAll(arg0, arg1, arg2 interface{}) *gomock.C } // GetProof mocks base method. -func (m *MockModule) GetProof(arg0 context.Context, arg1 uint64, arg2 namespace.ID, arg3 blob.Commitment) (*blob.Proof, error) { +func (m *MockModule) GetProof(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 blob.Commitment) (*blob.Proof, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetProof", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*blob.Proof) @@ -82,7 +83,7 @@ func (mr *MockModuleMockRecorder) GetProof(arg0, arg1, arg2, arg3 interface{}) * } // Included mocks base method. -func (m *MockModule) Included(arg0 context.Context, arg1 uint64, arg2 namespace.ID, arg3 *blob.Proof, arg4 blob.Commitment) (bool, error) { +func (m *MockModule) Included(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 *blob.Proof, arg4 blob.Commitment) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Included", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(bool) diff --git a/nodebuilder/config.go b/nodebuilder/config.go index 670bbf9bbd..41f24d6d3d 100644 --- a/nodebuilder/config.go +++ b/nodebuilder/config.go @@ -114,7 +114,7 @@ func removeConfig(path string) error { func UpdateConfig(tp node.Type, path string) (err error) { path, err = storePath(path) if err != nil { - return + return err } flock, err := fslock.Lock(lockPath(path)) @@ -122,7 +122,7 @@ func UpdateConfig(tp node.Type, path string) (err error) { if err == fslock.ErrLocked { err = ErrOpened } - return + return err } defer flock.Unlock() //nolint: errcheck @@ -131,18 +131,18 @@ func UpdateConfig(tp node.Type, path string) (err error) { cfgPath := configPath(path) cfg, err := LoadConfig(cfgPath) if err != nil { - return + return err } cfg, err = updateConfig(cfg, newCfg) if err != nil { - return + return err } // save the updated config err = removeConfig(cfgPath) if err != nil { - return + return err } return SaveConfig(cfgPath, cfg) } diff --git a/nodebuilder/core/config.go b/nodebuilder/core/config.go index c0261c7c86..4affcd3087 100644 --- a/nodebuilder/core/config.go +++ b/nodebuilder/core/config.go @@ -18,14 +18,18 @@ type Config struct { // node's connection to a Celestia-Core endpoint. func DefaultConfig() Config { return Config{ - IP: "0.0.0.0", - RPCPort: "0", - GRPCPort: "0", + IP: "", + RPCPort: "26657", + GRPCPort: "9090", } } // Validate performs basic validation of the config. func (cfg *Config) Validate() error { + if !cfg.IsEndpointConfigured() { + return nil + } + ip, err := utils.ValidateAddr(cfg.IP) if err != nil { return err @@ -41,3 +45,9 @@ func (cfg *Config) Validate() error { } return nil } + +// IsEndpointConfigured returns whether a core endpoint has been set +// on the config (true if set). +func (cfg *Config) IsEndpointConfigured() bool { + return cfg.IP != "" +} diff --git a/nodebuilder/das/constructors.go b/nodebuilder/das/constructors.go index 18f6962f40..7c6b5bed4f 100644 --- a/nodebuilder/das/constructors.go +++ b/nodebuilder/das/constructors.go @@ -42,16 +42,16 @@ func newDASer( hsub libhead.Subscriber[*header.ExtendedHeader], store libhead.Store[*header.ExtendedHeader], batching datastore.Batching, - fraudServ fraud.Service, + fraudServ fraud.Service[*header.ExtendedHeader], bFn shrexsub.BroadcastFn, options ...das.Option, -) (*das.DASer, *modfraud.ServiceBreaker[*das.DASer], error) { +) (*das.DASer, *modfraud.ServiceBreaker[*das.DASer, *header.ExtendedHeader], error) { ds, err := das.NewDASer(da, hsub, store, batching, fraudServ, bFn, options...) if err != nil { return nil, nil, err } - return ds, &modfraud.ServiceBreaker[*das.DASer]{ + return ds, &modfraud.ServiceBreaker[*das.DASer, *header.ExtendedHeader]{ Service: ds, FraudServ: fraudServ, FraudType: byzantine.BadEncoding, diff --git a/nodebuilder/das/mocks/api.go b/nodebuilder/das/mocks/api.go index c4046e90e8..68ffaf3c8c 100644 --- a/nodebuilder/das/mocks/api.go +++ b/nodebuilder/das/mocks/api.go @@ -8,8 +8,9 @@ import ( context "context" reflect "reflect" - das "github.com/celestiaorg/celestia-node/das" gomock "github.com/golang/mock/gomock" + + das "github.com/celestiaorg/celestia-node/das" ) // MockModule is a mock of Module interface. diff --git a/nodebuilder/das/module.go b/nodebuilder/das/module.go index 61c935fd40..d9f7e700e2 100644 --- a/nodebuilder/das/module.go +++ b/nodebuilder/das/module.go @@ -6,6 +6,7 @@ import ( "go.uber.org/fx" "github.com/celestiaorg/celestia-node/das" + "github.com/celestiaorg/celestia-node/header" modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" "github.com/celestiaorg/celestia-node/nodebuilder/node" ) @@ -41,10 +42,10 @@ func ConstructModule(tp node.Type, cfg *Config) fx.Option { baseComponents, fx.Provide(fx.Annotate( newDASer, - fx.OnStart(func(ctx context.Context, breaker *modfraud.ServiceBreaker[*das.DASer]) error { + fx.OnStart(func(ctx context.Context, breaker *modfraud.ServiceBreaker[*das.DASer, *header.ExtendedHeader]) error { return breaker.Start(ctx) }), - fx.OnStop(func(ctx context.Context, breaker *modfraud.ServiceBreaker[*das.DASer]) error { + fx.OnStop(func(ctx context.Context, breaker *modfraud.ServiceBreaker[*das.DASer, *header.ExtendedHeader]) error { return breaker.Stop(ctx) }), )), diff --git a/nodebuilder/fraud/constructors.go b/nodebuilder/fraud/constructors.go index a70ee3e3d4..eee85d4139 100644 --- a/nodebuilder/fraud/constructors.go +++ b/nodebuilder/fraud/constructors.go @@ -1,8 +1,6 @@ package fraud import ( - "context" - "github.com/ipfs/go-datastore" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" @@ -16,32 +14,46 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/p2p" ) -func newFraudService(syncerEnabled bool) func( - fx.Lifecycle, - *pubsub.PubSub, - host.Host, - libhead.Store[*header.ExtendedHeader], - datastore.Batching, - p2p.Network, -) (Module, fraud.Service, error) { - return func( - lc fx.Lifecycle, - sub *pubsub.PubSub, - host host.Host, - hstore libhead.Store[*header.ExtendedHeader], - ds datastore.Batching, - network p2p.Network, - ) (Module, fraud.Service, error) { - getter := func(ctx context.Context, height uint64) (libhead.Header, error) { - return hstore.GetByHeight(ctx, height) - } - pservice := fraudserv.NewProofService(sub, host, getter, ds, syncerEnabled, network.String()) - lc.Append(fx.Hook{ - OnStart: pservice.Start, - OnStop: pservice.Stop, - }) - return &Service{ - Service: pservice, - }, pservice, nil - } +func fraudUnmarshaler() fraud.ProofUnmarshaler[*header.ExtendedHeader] { + return defaultProofUnmarshaler +} + +func newFraudServiceWithSync( + lc fx.Lifecycle, + sub *pubsub.PubSub, + host host.Host, + hstore libhead.Store[*header.ExtendedHeader], + registry fraud.ProofUnmarshaler[*header.ExtendedHeader], + ds datastore.Batching, + network p2p.Network, +) (Module, fraud.Service[*header.ExtendedHeader], error) { + syncerEnabled := true + pservice := fraudserv.NewProofService(sub, host, hstore.GetByHeight, registry, ds, syncerEnabled, network.String()) + lc.Append(fx.Hook{ + OnStart: pservice.Start, + OnStop: pservice.Stop, + }) + return &module{ + Service: pservice, + }, pservice, nil +} + +func newFraudServiceWithoutSync( + lc fx.Lifecycle, + sub *pubsub.PubSub, + host host.Host, + hstore libhead.Store[*header.ExtendedHeader], + registry fraud.ProofUnmarshaler[*header.ExtendedHeader], + ds datastore.Batching, + network p2p.Network, +) (Module, fraud.Service[*header.ExtendedHeader], error) { + syncerEnabled := false + pservice := fraudserv.NewProofService(sub, host, hstore.GetByHeight, registry, ds, syncerEnabled, network.String()) + lc.Append(fx.Hook{ + OnStart: pservice.Start, + OnStop: pservice.Stop, + }) + return &module{ + Service: pservice, + }, pservice, nil } diff --git a/nodebuilder/fraud/fraud.go b/nodebuilder/fraud/fraud.go index 8d10d34e88..45c3863d6f 100644 --- a/nodebuilder/fraud/fraud.go +++ b/nodebuilder/fraud/fraud.go @@ -2,8 +2,12 @@ package fraud import ( "context" + "encoding/json" + "errors" "github.com/celestiaorg/go-fraud" + + "github.com/celestiaorg/celestia-node/header" ) var _ Module = (*API)(nil) @@ -35,3 +39,83 @@ func (api *API) Subscribe(ctx context.Context, proofType fraud.ProofType) (<-cha func (api *API) Get(ctx context.Context, proofType fraud.ProofType) ([]Proof, error) { return api.Internal.Get(ctx, proofType) } + +var _ Module = (*module)(nil) + +// module is an implementation of Module that uses fraud.module as a backend. It is used to +// provide fraud proofs as a non-interface type to the API, and wrap fraud.Subscriber with a +// channel of Proofs. +type module struct { + fraud.Service[*header.ExtendedHeader] +} + +func (s *module) Subscribe(ctx context.Context, proofType fraud.ProofType) (<-chan Proof, error) { + subscription, err := s.Service.Subscribe(proofType) + if err != nil { + return nil, err + } + proofs := make(chan Proof) + go func() { + defer close(proofs) + defer subscription.Cancel() + for { + proof, err := subscription.Proof(ctx) + if err != nil { + if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { + log.Errorw("fetching proof from subscription", "err", err) + } + return + } + select { + case <-ctx.Done(): + return + case proofs <- Proof{Proof: proof}: + } + } + }() + return proofs, nil +} + +func (s *module) Get(ctx context.Context, proofType fraud.ProofType) ([]Proof, error) { + originalProofs, err := s.Service.Get(ctx, proofType) + if err != nil { + return nil, err + } + proofs := make([]Proof, len(originalProofs)) + for i, originalProof := range originalProofs { + proofs[i].Proof = originalProof + } + return proofs, nil +} + +// Proof embeds the fraud.Proof interface type to provide a concrete type for JSON serialization. +type Proof struct { + fraud.Proof[*header.ExtendedHeader] +} + +type fraudProofJSON struct { + ProofType fraud.ProofType `json:"proof_type"` + Data []byte `json:"data"` +} + +func (f *Proof) UnmarshalJSON(data []byte) error { + var fp fraudProofJSON + err := json.Unmarshal(data, &fp) + if err != nil { + return err + } + f.Proof, err = defaultProofUnmarshaler.Unmarshal(fp.ProofType, fp.Data) + return err +} + +func (f *Proof) MarshalJSON() ([]byte, error) { + marshaledProof, err := f.MarshalBinary() + if err != nil { + return nil, err + } + fraudProof := &fraudProofJSON{ + ProofType: f.Type(), + Data: marshaledProof, + } + return json.Marshal(fraudProof) +} diff --git a/nodebuilder/fraud/lifecycle.go b/nodebuilder/fraud/lifecycle.go index 24ed402f5d..1a6702aafa 100644 --- a/nodebuilder/fraud/lifecycle.go +++ b/nodebuilder/fraud/lifecycle.go @@ -2,11 +2,13 @@ package fraud import ( "context" + "errors" "fmt" "github.com/ipfs/go-datastore" "github.com/celestiaorg/go-fraud" + libhead "github.com/celestiaorg/go-header" ) // service defines minimal interface with service lifecycle methods @@ -18,26 +20,30 @@ type service interface { // ServiceBreaker wraps any service with fraud proof subscription of a specific type. // If proof happens the service is Stopped automatically. // TODO(@Wondertan): Support multiple fraud types. -type ServiceBreaker[S service] struct { +type ServiceBreaker[S service, H libhead.Header[H]] struct { Service S FraudType fraud.ProofType - FraudServ fraud.Service + FraudServ fraud.Service[H] ctx context.Context cancel context.CancelFunc - sub fraud.Subscription + sub fraud.Subscription[H] } // Start starts the inner service if there are no fraud proofs stored. // Subscribes for fraud and stops the service whenever necessary. -func (breaker *ServiceBreaker[S]) Start(ctx context.Context) error { +func (breaker *ServiceBreaker[S, H]) Start(ctx context.Context) error { + if breaker == nil { + return nil + } + proofs, err := breaker.FraudServ.Get(ctx, breaker.FraudType) - switch err { + switch { default: return fmt.Errorf("getting proof(%s): %w", breaker.FraudType, err) - case nil: - return &fraud.ErrFraudExists{Proof: proofs} - case datastore.ErrNotFound: + case err == nil: + return &fraud.ErrFraudExists[H]{Proof: proofs} + case errors.Is(err, datastore.ErrNotFound): } err = breaker.Service.Start(ctx) @@ -56,7 +62,11 @@ func (breaker *ServiceBreaker[S]) Start(ctx context.Context) error { } // Stop stops the service and cancels subscription. -func (breaker *ServiceBreaker[S]) Stop(ctx context.Context) error { +func (breaker *ServiceBreaker[S, H]) Stop(ctx context.Context) error { + if breaker == nil { + return nil + } + if breaker.ctx.Err() != nil { // short circuit if the service was already stopped return nil @@ -67,13 +77,13 @@ func (breaker *ServiceBreaker[S]) Stop(ctx context.Context) error { return breaker.Service.Stop(ctx) } -func (breaker *ServiceBreaker[S]) awaitProof() { +func (breaker *ServiceBreaker[S, H]) awaitProof() { _, err := breaker.sub.Proof(breaker.ctx) if err != nil { return } - if err := breaker.Stop(breaker.ctx); err != nil && err != context.Canceled { + if err := breaker.Stop(breaker.ctx); err != nil && !errors.Is(err, context.Canceled) { log.Errorw("stopping service: %s", err.Error()) } } diff --git a/nodebuilder/fraud/mocks/api.go b/nodebuilder/fraud/mocks/api.go index ba88131695..399f8746e1 100644 --- a/nodebuilder/fraud/mocks/api.go +++ b/nodebuilder/fraud/mocks/api.go @@ -8,9 +8,10 @@ import ( context "context" reflect "reflect" + gomock "github.com/golang/mock/gomock" + fraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" fraud0 "github.com/celestiaorg/go-fraud" - gomock "github.com/golang/mock/gomock" ) // MockModule is a mock of Module interface. diff --git a/nodebuilder/fraud/module.go b/nodebuilder/fraud/module.go index 718b702f84..bf353f63c6 100644 --- a/nodebuilder/fraud/module.go +++ b/nodebuilder/fraud/module.go @@ -6,27 +6,31 @@ import ( "github.com/celestiaorg/go-fraud" + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" ) var log = logging.Logger("module/fraud") func ConstructModule(tp node.Type) fx.Option { - baseComponent := fx.Provide(func(serv fraud.Service) fraud.Getter { - return serv - }) + baseComponent := fx.Options( + fx.Provide(fraudUnmarshaler), + fx.Provide(func(serv fraud.Service[*header.ExtendedHeader]) fraud.Getter[*header.ExtendedHeader] { + return serv + }), + ) switch tp { case node.Light: return fx.Module( "fraud", baseComponent, - fx.Provide(newFraudService(true)), + fx.Provide(newFraudServiceWithSync), ) case node.Full, node.Bridge: return fx.Module( "fraud", baseComponent, - fx.Provide(newFraudService(false)), + fx.Provide(newFraudServiceWithoutSync), ) default: panic("invalid node type") diff --git a/nodebuilder/fraud/service.go b/nodebuilder/fraud/service.go deleted file mode 100644 index 0337c375ef..0000000000 --- a/nodebuilder/fraud/service.go +++ /dev/null @@ -1,87 +0,0 @@ -package fraud - -import ( - "context" - "encoding/json" - - "github.com/celestiaorg/go-fraud" -) - -var _ Module = (*Service)(nil) - -// Service is an implementation of Module that uses fraud.Service as a backend. It is used to -// provide fraud proofs as a non-interface type to the API, and wrap fraud.Subscriber with a -// channel of Proofs. -type Service struct { - fraud.Service -} - -func (s *Service) Subscribe(ctx context.Context, proofType fraud.ProofType) (<-chan Proof, error) { - subscription, err := s.Service.Subscribe(proofType) - if err != nil { - return nil, err - } - proofs := make(chan Proof) - go func() { - defer close(proofs) - for { - proof, err := subscription.Proof(ctx) - if err != nil { - if err != context.DeadlineExceeded && err != context.Canceled { - log.Errorw("fetching proof from subscription", "err", err) - } - return - } - select { - case <-ctx.Done(): - return - case proofs <- Proof{Proof: proof}: - } - } - }() - return proofs, nil -} - -func (s *Service) Get(ctx context.Context, proofType fraud.ProofType) ([]Proof, error) { - originalProofs, err := s.Service.Get(ctx, proofType) - if err != nil { - return nil, err - } - proofs := make([]Proof, len(originalProofs)) - for i, originalProof := range originalProofs { - proofs[i].Proof = originalProof - } - return proofs, nil -} - -// Proof embeds the fraud.Proof interface type to provide a concrete type for JSON serialization. -type Proof struct { - fraud.Proof -} - -type fraudProofJSON struct { - ProofType fraud.ProofType `json:"proof_type"` - Data []byte `json:"data"` -} - -func (f *Proof) UnmarshalJSON(data []byte) error { - var fp fraudProofJSON - err := json.Unmarshal(data, &fp) - if err != nil { - return err - } - f.Proof, err = fraud.Unmarshal(fp.ProofType, fp.Data) - return err -} - -func (f *Proof) MarshalJSON() ([]byte, error) { - marshaledProof, err := f.MarshalBinary() - if err != nil { - return nil, err - } - fraudProof := &fraudProofJSON{ - ProofType: f.Type(), - Data: marshaledProof, - } - return json.Marshal(fraudProof) -} diff --git a/nodebuilder/fraud/unmarshaler.go b/nodebuilder/fraud/unmarshaler.go new file mode 100644 index 0000000000..d5e0461f01 --- /dev/null +++ b/nodebuilder/fraud/unmarshaler.go @@ -0,0 +1,32 @@ +package fraud + +import ( + "github.com/celestiaorg/go-fraud" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" +) + +var defaultProofUnmarshaler proofRegistry + +type proofRegistry struct{} + +func (pr proofRegistry) List() []fraud.ProofType { + return []fraud.ProofType{ + byzantine.BadEncoding, + } +} + +func (pr proofRegistry) Unmarshal(proofType fraud.ProofType, data []byte) (fraud.Proof[*header.ExtendedHeader], error) { + switch proofType { + case byzantine.BadEncoding: + befp := &byzantine.BadEncodingProof{} + err := befp.UnmarshalBinary(data) + if err != nil { + return nil, err + } + return befp, nil + default: + return nil, &fraud.ErrNoUnmarshaler{ProofType: proofType} + } +} diff --git a/nodebuilder/header/constructors.go b/nodebuilder/header/constructors.go index 7d70f0f5a8..267f0c30f7 100644 --- a/nodebuilder/header/constructors.go +++ b/nodebuilder/header/constructors.go @@ -15,21 +15,20 @@ import ( "github.com/celestiaorg/go-header/store" "github.com/celestiaorg/go-header/sync" - "github.com/celestiaorg/celestia-node/header" modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" modp2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/share/eds/byzantine" ) // newP2PExchange constructs a new Exchange for headers. -func newP2PExchange( +func newP2PExchange[H libhead.Header[H]]( lc fx.Lifecycle, bpeers modp2p.Bootstrappers, network modp2p.Network, host host.Host, conngater *conngater.BasicConnectionGater, cfg Config, -) (libhead.Exchange[*header.ExtendedHeader], error) { +) (libhead.Exchange[H], error) { peers, err := cfg.trustedPeers(bpeers) if err != nil { return nil, err @@ -39,7 +38,7 @@ func newP2PExchange( ids[index] = peer.ID host.Peerstore().AddAddrs(peer.ID, peer.Addrs, peerstore.PermanentAddrTTL) } - exchange, err := p2p.NewExchange[*header.ExtendedHeader](host, ids, conngater, + exchange, err := p2p.NewExchange[H](host, ids, conngater, p2p.WithParams(cfg.Client), p2p.WithNetworkID[p2p.ClientParameters](network.String()), p2p.WithChainID(network.String()), @@ -60,14 +59,14 @@ func newP2PExchange( } // newSyncer constructs new Syncer for headers. -func newSyncer( - ex libhead.Exchange[*header.ExtendedHeader], - fservice libfraud.Service, - store InitStore, - sub libhead.Subscriber[*header.ExtendedHeader], +func newSyncer[H libhead.Header[H]]( + ex libhead.Exchange[H], + fservice libfraud.Service[H], + store InitStore[H], + sub libhead.Subscriber[H], cfg Config, -) (*sync.Syncer[*header.ExtendedHeader], *modfraud.ServiceBreaker[*sync.Syncer[*header.ExtendedHeader]], error) { - syncer, err := sync.NewSyncer[*header.ExtendedHeader](ex, store, sub, +) (*sync.Syncer[H], *modfraud.ServiceBreaker[*sync.Syncer[H], H], error) { + syncer, err := sync.NewSyncer[H](ex, store, sub, sync.WithParams(cfg.Syncer), sync.WithBlockTime(modp2p.BlockTime), ) @@ -75,7 +74,7 @@ func newSyncer( return nil, nil, err } - return syncer, &modfraud.ServiceBreaker[*sync.Syncer[*header.ExtendedHeader]]{ + return syncer, &modfraud.ServiceBreaker[*sync.Syncer[H], H]{ Service: syncer, FraudType: byzantine.BadEncoding, FraudServ: fservice, @@ -84,16 +83,16 @@ func newSyncer( // InitStore is a type representing initialized header store. // NOTE: It is needed to ensure that Store is always initialized before Syncer is started. -type InitStore libhead.Store[*header.ExtendedHeader] +type InitStore[H libhead.Header[H]] libhead.Store[H] // newInitStore constructs an initialized store -func newInitStore( +func newInitStore[H libhead.Header[H]]( lc fx.Lifecycle, cfg Config, net modp2p.Network, - s libhead.Store[*header.ExtendedHeader], - ex libhead.Exchange[*header.ExtendedHeader], -) (InitStore, error) { + s libhead.Store[H], + ex libhead.Exchange[H], +) (InitStore[H], error) { trustedHash, err := cfg.trustedHash(net) if err != nil { return nil, err diff --git a/nodebuilder/header/mocks/api.go b/nodebuilder/header/mocks/api.go index 02529a8ef9..7d6661ff5d 100644 --- a/nodebuilder/header/mocks/api.go +++ b/nodebuilder/header/mocks/api.go @@ -8,10 +8,11 @@ import ( context "context" reflect "reflect" + gomock "github.com/golang/mock/gomock" + header "github.com/celestiaorg/celestia-node/header" header0 "github.com/celestiaorg/go-header" sync "github.com/celestiaorg/go-header/sync" - gomock "github.com/golang/mock/gomock" ) // MockModule is a mock of Module interface. diff --git a/nodebuilder/header/module.go b/nodebuilder/header/module.go index 77e7c5eb99..5e02e94fe1 100644 --- a/nodebuilder/header/module.go +++ b/nodebuilder/header/module.go @@ -22,7 +22,7 @@ import ( var log = logging.Logger("module/header") -func ConstructModule(tp node.Type, cfg *Config) fx.Option { +func ConstructModule[H libhead.Header[H]](tp node.Type, cfg *Config) fx.Option { // sanitize config values before constructing module cfgErr := cfg.Validate(tp) @@ -31,61 +31,63 @@ func ConstructModule(tp node.Type, cfg *Config) fx.Option { fx.Error(cfgErr), fx.Provide(newHeaderService), fx.Provide(fx.Annotate( - func(ds datastore.Batching) (libhead.Store[*header.ExtendedHeader], error) { - return store.NewStore[*header.ExtendedHeader](ds, store.WithParams(cfg.Store)) + func(ds datastore.Batching) (libhead.Store[H], error) { + return store.NewStore[H](ds, store.WithParams(cfg.Store)) }, - fx.OnStart(func(ctx context.Context, store libhead.Store[*header.ExtendedHeader]) error { - return store.Start(ctx) + fx.OnStart(func(ctx context.Context, str libhead.Store[H]) error { + s := str.(*store.Store[H]) + return s.Start(ctx) }), - fx.OnStop(func(ctx context.Context, store libhead.Store[*header.ExtendedHeader]) error { - return store.Stop(ctx) + fx.OnStop(func(ctx context.Context, str libhead.Store[H]) error { + s := str.(*store.Store[H]) + return s.Stop(ctx) }), )), - fx.Provide(newInitStore), - fx.Provide(func(subscriber *p2p.Subscriber[*header.ExtendedHeader]) libhead.Subscriber[*header.ExtendedHeader] { + fx.Provide(newInitStore[H]), + fx.Provide(func(subscriber *p2p.Subscriber[H]) libhead.Subscriber[H] { return subscriber }), fx.Provide(fx.Annotate( - newSyncer, + newSyncer[H], fx.OnStart(func( ctx context.Context, - breaker *modfraud.ServiceBreaker[*sync.Syncer[*header.ExtendedHeader]], + breaker *modfraud.ServiceBreaker[*sync.Syncer[H], H], ) error { return breaker.Start(ctx) }), fx.OnStop(func( ctx context.Context, - breaker *modfraud.ServiceBreaker[*sync.Syncer[*header.ExtendedHeader]], + breaker *modfraud.ServiceBreaker[*sync.Syncer[H], H], ) error { return breaker.Stop(ctx) }), )), fx.Provide(fx.Annotate( - func(ps *pubsub.PubSub, network modp2p.Network) *p2p.Subscriber[*header.ExtendedHeader] { - return p2p.NewSubscriber[*header.ExtendedHeader](ps, header.MsgID, network.String()) + func(ps *pubsub.PubSub, network modp2p.Network) *p2p.Subscriber[H] { + return p2p.NewSubscriber[H](ps, header.MsgID, network.String()) }, - fx.OnStart(func(ctx context.Context, sub *p2p.Subscriber[*header.ExtendedHeader]) error { + fx.OnStart(func(ctx context.Context, sub *p2p.Subscriber[H]) error { return sub.Start(ctx) }), - fx.OnStop(func(ctx context.Context, sub *p2p.Subscriber[*header.ExtendedHeader]) error { + fx.OnStop(func(ctx context.Context, sub *p2p.Subscriber[H]) error { return sub.Stop(ctx) }), )), fx.Provide(fx.Annotate( func( host host.Host, - store libhead.Store[*header.ExtendedHeader], + store libhead.Store[H], network modp2p.Network, - ) (*p2p.ExchangeServer[*header.ExtendedHeader], error) { - return p2p.NewExchangeServer[*header.ExtendedHeader](host, store, + ) (*p2p.ExchangeServer[H], error) { + return p2p.NewExchangeServer[H](host, store, p2p.WithParams(cfg.Server), p2p.WithNetworkID[p2p.ServerParameters](network.String()), ) }, - fx.OnStart(func(ctx context.Context, server *p2p.ExchangeServer[*header.ExtendedHeader]) error { + fx.OnStart(func(ctx context.Context, server *p2p.ExchangeServer[H]) error { return server.Start(ctx) }), - fx.OnStop(func(ctx context.Context, server *p2p.ExchangeServer[*header.ExtendedHeader]) error { + fx.OnStop(func(ctx context.Context, server *p2p.ExchangeServer[H]) error { return server.Stop(ctx) }), )), @@ -96,13 +98,13 @@ func ConstructModule(tp node.Type, cfg *Config) fx.Option { return fx.Module( "header", baseComponents, - fx.Provide(newP2PExchange), + fx.Provide(newP2PExchange[H]), ) case node.Bridge: return fx.Module( "header", baseComponents, - fx.Provide(func(subscriber *p2p.Subscriber[*header.ExtendedHeader]) libhead.Broadcaster[*header.ExtendedHeader] { + fx.Provide(func(subscriber *p2p.Subscriber[H]) libhead.Broadcaster[H] { return subscriber }), fx.Supply(header.MakeExtendedHeader), diff --git a/nodebuilder/header/module_test.go b/nodebuilder/header/module_test.go index 89293e4ab4..6a35e35284 100644 --- a/nodebuilder/header/module_test.go +++ b/nodebuilder/header/module_test.go @@ -38,7 +38,7 @@ func TestConstructModule_StoreParams(t *testing.T) { fx.Provide(func() datastore.Batching { return datastore.NewMapDatastore() }), - ConstructModule(node.Light, &cfg), + ConstructModule[*header.ExtendedHeader](node.Light, &cfg), fx.Invoke( func(s libhead.Store[*header.ExtendedHeader]) { ss := s.(*store.Store[*header.ExtendedHeader]) @@ -72,10 +72,10 @@ func TestConstructModule_SyncerParams(t *testing.T) { fx.Provide(func() datastore.Batching { return datastore.NewMapDatastore() }), - fx.Provide(func() fraud.Service { + fx.Provide(func() fraud.Service[*header.ExtendedHeader] { return nil }), - ConstructModule(node.Light, &cfg), + ConstructModule[*header.ExtendedHeader](node.Light, &cfg), fx.Invoke(func(s *sync.Syncer[*header.ExtendedHeader]) { syncer = s }), @@ -100,7 +100,7 @@ func TestConstructModule_ExchangeParams(t *testing.T) { fx.Provide(func() datastore.Batching { return datastore.NewMapDatastore() }), - ConstructModule(node.Light, &cfg), + ConstructModule[*header.ExtendedHeader](node.Light, &cfg), fx.Provide(func(b datastore.Batching) (*conngater.BasicConnectionGater, error) { return conngater.NewBasicConnectionGater(b) }), diff --git a/nodebuilder/header/service.go b/nodebuilder/header/service.go index f410c04f04..2b208cb88d 100644 --- a/nodebuilder/header/service.go +++ b/nodebuilder/header/service.go @@ -65,9 +65,9 @@ func (s *Service) GetByHeight(ctx context.Context, height uint64) (*header.Exten switch { case err != nil: return nil, err - case uint64(head.Height()) == height: + case head.Height() == height: return head, nil - case uint64(head.Height())+1 < height: + case head.Height()+1 < height: return nil, fmt.Errorf("header: given height is from the future: "+ "networkHeight: %d, requestedHeight: %d", head.Height(), height) } @@ -78,10 +78,10 @@ func (s *Service) GetByHeight(ctx context.Context, height uint64) (*header.Exten switch { case err != nil: return nil, err - case uint64(head.Height()) == height: + case head.Height() == height: return head, nil // `+1` allows for one header network lag, e.g. user request header that is milliseconds away - case uint64(head.Height())+1 < height: + case head.Height()+1 < height: return nil, fmt.Errorf("header: syncing in progress: "+ "localHeadHeight: %d, requestedHeight: %d", head.Height(), height) default: diff --git a/nodebuilder/header/service_test.go b/nodebuilder/header/service_test.go index 6493d3d51d..14d5ada87d 100644 --- a/nodebuilder/header/service_test.go +++ b/nodebuilder/header/service_test.go @@ -25,9 +25,9 @@ func TestGetByHeightHandlesError(t *testing.T) { }) } -type errorSyncer[H libhead.Header] struct{} +type errorSyncer[H libhead.Header[H]] struct{} -func (d *errorSyncer[H]) Head(context.Context) (H, error) { +func (d *errorSyncer[H]) Head(context.Context, ...libhead.HeadOption[H]) (H, error) { var zero H return zero, fmt.Errorf("dummy error") } diff --git a/nodebuilder/init.go b/nodebuilder/init.go index 2cabfc8abd..0593d88560 100644 --- a/nodebuilder/init.go +++ b/nodebuilder/init.go @@ -18,6 +18,9 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/state" ) +// PrintKeyringInfo whether to print keyring information during init. +var PrintKeyringInfo = true + // Init initializes the Node FileSystem Store for the given Node Type 'tp' in the directory under // 'path'. func Init(cfg Config, path string, tp node.Type) error { @@ -213,8 +216,10 @@ func generateKeys(cfg Config, ksPath string) error { if err != nil { return err } - fmt.Printf("\nNAME: %s\nADDRESS: %s\nMNEMONIC (save this somewhere safe!!!): \n%s\n\n", - keyInfo.Name, addr.String(), mn) + if PrintKeyringInfo { + fmt.Printf("\nNAME: %s\nADDRESS: %s\nMNEMONIC (save this somewhere safe!!!): \n%s\n\n", + keyInfo.Name, addr.String(), mn) + } return nil } diff --git a/nodebuilder/module.go b/nodebuilder/module.go index 51edc26c72..3068113102 100644 --- a/nodebuilder/module.go +++ b/nodebuilder/module.go @@ -5,13 +5,14 @@ import ( "go.uber.org/fx" + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/libs/fxutil" "github.com/celestiaorg/celestia-node/nodebuilder/blob" "github.com/celestiaorg/celestia-node/nodebuilder/core" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" "github.com/celestiaorg/celestia-node/nodebuilder/gateway" - "github.com/celestiaorg/celestia-node/nodebuilder/header" + modhead "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/nodebuilder/rpc" @@ -45,8 +46,8 @@ func ConstructModule(tp node.Type, network p2p.Network, cfg *Config, store Store fx.Supply(signer), // modules provided by the node p2p.ConstructModule(tp, &cfg.P2P), - state.ConstructModule(tp, &cfg.State), - header.ConstructModule(tp, &cfg.Header), + state.ConstructModule(tp, &cfg.State, &cfg.Core), + modhead.ConstructModule[*header.ExtendedHeader](tp, &cfg.Header), share.ConstructModule(tp, &cfg.Share), rpc.ConstructModule(tp, &cfg.RPC), gateway.ConstructModule(tp, &cfg.Gateway), diff --git a/nodebuilder/node.go b/nodebuilder/node.go index 19760831cf..6d83e6c4c3 100644 --- a/nodebuilder/node.go +++ b/nodebuilder/node.go @@ -6,9 +6,8 @@ import ( "fmt" "strings" - "github.com/cristalhq/jwt" + "github.com/ipfs/boxo/exchange" "github.com/ipfs/go-blockservice" - exchange "github.com/ipfs/go-ipfs-exchange-interface" logging "github.com/ipfs/go-log/v2" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" @@ -49,7 +48,6 @@ type Node struct { Network p2p.Network Bootstrappers p2p.Bootstrappers Config *Config - AdminSigner jwt.Signer // rpc components RPCServer *rpc.Server // not optional @@ -70,6 +68,7 @@ type Node struct { FraudServ fraud.Module // not optional BlobServ blob.Module // not optional DASer das.Module // not optional + AdminServ node.Module // not optional // start and stop control ref internal fx.App lifecycle funcs to be called from Start and Stop start, stop lifecycleFunc diff --git a/nodebuilder/node/admin.go b/nodebuilder/node/admin.go index cad8a51361..c6c97625ef 100644 --- a/nodebuilder/node/admin.go +++ b/nodebuilder/node/admin.go @@ -2,21 +2,25 @@ package node import ( "context" - "fmt" + "github.com/cristalhq/jwt" "github.com/filecoin-project/go-jsonrpc/auth" logging "github.com/ipfs/go-log/v2" + + "github.com/celestiaorg/celestia-node/libs/authtoken" ) -const APIVersion = "v0.2.0" +const APIVersion = "v0.2.1" type module struct { - tp Type + tp Type + signer jwt.Signer } -func newModule(tp Type) Module { +func newModule(tp Type, signer jwt.Signer) Module { return &module{ - tp: tp, + tp: tp, + signer: signer, } } @@ -38,10 +42,10 @@ func (m *module) LogLevelSet(_ context.Context, name, level string) error { return logging.SetLogLevel(name, level) } -func (m *module) AuthVerify(context.Context, string) ([]auth.Permission, error) { - return []auth.Permission{}, fmt.Errorf("not implemented") +func (m *module) AuthVerify(_ context.Context, token string) ([]auth.Permission, error) { + return authtoken.ExtractSignedPermissions(m.signer, token) } -func (m *module) AuthNew(context.Context, []auth.Permission) ([]byte, error) { - return nil, fmt.Errorf("not implemented") +func (m *module) AuthNew(_ context.Context, permissions []auth.Permission) (string, error) { + return authtoken.NewSignedJWT(m.signer, permissions) } diff --git a/nodebuilder/node/buildInfo.go b/nodebuilder/node/buildInfo.go index 5f5bdde28e..53d8554d4d 100644 --- a/nodebuilder/node/buildInfo.go +++ b/nodebuilder/node/buildInfo.go @@ -1,9 +1,35 @@ package node -// BuildInfo stores all necessary information for the current build. +import ( + "fmt" + "runtime" +) + +var ( + buildTime string + lastCommit string + semanticVersion string + + systemVersion = fmt.Sprintf("%s/%s", runtime.GOARCH, runtime.GOOS) + golangVersion = runtime.Version() +) + +// BuildInfo represents all necessary information about current build. type BuildInfo struct { + BuildTime string LastCommit string SemanticVersion string SystemVersion string GolangVersion string } + +// GetBuildInfo returns information about current build. +func GetBuildInfo() *BuildInfo { + return &BuildInfo{ + buildTime, + lastCommit, + semanticVersion, + systemVersion, + golangVersion, + } +} diff --git a/nodebuilder/node/metrics.go b/nodebuilder/node/metrics.go index 625e8425e8..07c9a5fc0f 100644 --- a/nodebuilder/node/metrics.go +++ b/nodebuilder/node/metrics.go @@ -4,11 +4,12 @@ import ( "context" "time" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" ) -var meter = global.MeterProvider().Meter("node") +var meter = otel.Meter("node") var ( timeStarted time.Time @@ -17,37 +18,55 @@ var ( // WithMetrics registers node metrics. func WithMetrics() error { - nodeStartTS, err := meter. - AsyncFloat64(). - Gauge( - "node_start_ts", - instrument.WithDescription("timestamp when the node was started"), - ) + nodeStartTS, err := meter.Int64ObservableGauge( + "node_start_ts", + metric.WithDescription("timestamp when the node was started"), + ) if err != nil { return err } - totalNodeRunTime, err := meter. - AsyncFloat64(). - Counter( - "node_runtime_counter_in_seconds", - instrument.WithDescription("total time the node has been running"), - ) + totalNodeRunTime, err := meter.Float64ObservableCounter( + "node_runtime_counter_in_seconds", + metric.WithDescription("total time the node has been running"), + ) if err != nil { return err } - return meter.RegisterCallback( - []instrument.Asynchronous{nodeStartTS, totalNodeRunTime}, - func(ctx context.Context) { - if !nodeStarted { - // Observe node start timestamp - timeStarted = time.Now() - nodeStartTS.Observe(ctx, float64(timeStarted.Unix())) - nodeStarted = true - } - - totalNodeRunTime.Observe(ctx, time.Since(timeStarted).Seconds()) - }, + buildInfoGauge, err := meter.Float64ObservableGauge( + "build_info", + metric.WithDescription("Celestia Node build information"), ) + if err != nil { + return err + } + + callback := func(ctx context.Context, observer metric.Observer) error { + if !nodeStarted { + // Observe node start timestamp + timeStarted = time.Now() + observer.ObserveInt64(nodeStartTS, timeStarted.Unix()) + nodeStarted = true + } + + observer.ObserveFloat64(totalNodeRunTime, time.Since(timeStarted).Seconds()) + + // Observe build info with labels + labels := metric.WithAttributes( + attribute.String("build_time", buildTime), + attribute.String("last_commit", lastCommit), + attribute.String("semantic_version", semanticVersion), + attribute.String("system_version", systemVersion), + attribute.String("golang_version", golangVersion), + ) + + observer.ObserveFloat64(buildInfoGauge, 1, labels) + + return nil + } + + _, err = meter.RegisterCallback(callback, nodeStartTS, totalNodeRunTime, buildInfoGauge) + + return err } diff --git a/nodebuilder/node/mocks/api.go b/nodebuilder/node/mocks/api.go index 98df713429..14357316dc 100644 --- a/nodebuilder/node/mocks/api.go +++ b/nodebuilder/node/mocks/api.go @@ -8,9 +8,10 @@ import ( context "context" reflect "reflect" - node "github.com/celestiaorg/celestia-node/nodebuilder/node" auth "github.com/filecoin-project/go-jsonrpc/auth" gomock "github.com/golang/mock/gomock" + + node "github.com/celestiaorg/celestia-node/nodebuilder/node" ) // MockModule is a mock of Module interface. @@ -37,10 +38,10 @@ func (m *MockModule) EXPECT() *MockModuleMockRecorder { } // AuthNew mocks base method. -func (m *MockModule) AuthNew(arg0 context.Context, arg1 []auth.Permission) ([]byte, error) { +func (m *MockModule) AuthNew(arg0 context.Context, arg1 []auth.Permission) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AuthNew", arg0, arg1) - ret0, _ := ret[0].([]byte) + ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/nodebuilder/node/module.go b/nodebuilder/node/module.go index e7ae88182e..5abfad8e5f 100644 --- a/nodebuilder/node/module.go +++ b/nodebuilder/node/module.go @@ -9,7 +9,7 @@ func ConstructModule(tp Type) fx.Option { return fx.Module( "node", fx.Provide(func(secret jwt.Signer) Module { - return newModule(tp) + return newModule(tp, secret) }), fx.Provide(secret), ) diff --git a/nodebuilder/node/node.go b/nodebuilder/node/node.go index c33f73ef58..18ce93615b 100644 --- a/nodebuilder/node/node.go +++ b/nodebuilder/node/node.go @@ -20,7 +20,7 @@ type Module interface { // AuthVerify returns the permissions assigned to the given token. AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) // AuthNew signs and returns a new token with the given permissions. - AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) + AuthNew(ctx context.Context, perms []auth.Permission) (string, error) } var _ Module = (*API)(nil) @@ -30,7 +30,7 @@ type API struct { Info func(context.Context) (Info, error) `perm:"admin"` LogLevelSet func(ctx context.Context, name, level string) error `perm:"admin"` AuthVerify func(ctx context.Context, token string) ([]auth.Permission, error) `perm:"admin"` - AuthNew func(ctx context.Context, perms []auth.Permission) ([]byte, error) `perm:"admin"` + AuthNew func(ctx context.Context, perms []auth.Permission) (string, error) `perm:"admin"` } } @@ -46,6 +46,6 @@ func (api *API) AuthVerify(ctx context.Context, token string) ([]auth.Permission return api.Internal.AuthVerify(ctx, token) } -func (api *API) AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) { +func (api *API) AuthNew(ctx context.Context, perms []auth.Permission) (string, error) { return api.Internal.AuthNew(ctx, perms) } diff --git a/nodebuilder/node_light_test.go b/nodebuilder/node_light_test.go index a7a70d0622..7138a23c9e 100644 --- a/nodebuilder/node_light_test.go +++ b/nodebuilder/node_light_test.go @@ -1,6 +1,7 @@ package nodebuilder import ( + "context" "crypto/rand" "testing" @@ -11,6 +12,7 @@ import ( nodebuilder "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/state" ) func TestNewLightWithP2PKey(t *testing.T) { @@ -44,3 +46,11 @@ func TestLight_WithNetwork(t *testing.T) { require.NotNil(t, node) assert.Equal(t, p2p.Private, node.Network) } + +// TestLight_WithStubbedCoreAccessor ensures that a node started without +// a core connection will return a stubbed StateModule. +func TestLight_WithStubbedCoreAccessor(t *testing.T) { + node := TestNode(t, nodebuilder.Light) + _, err := node.StateServ.Balance(context.Background()) + assert.ErrorIs(t, state.ErrNoStateAccess, err) +} diff --git a/nodebuilder/node_test.go b/nodebuilder/node_test.go index bd5d1da811..3fc3f4f02a 100644 --- a/nodebuilder/node_test.go +++ b/nodebuilder/node_test.go @@ -34,7 +34,7 @@ func TestLifecycle(t *testing.T) { require.NotNil(t, node.Host) require.NotNil(t, node.HeaderServ) require.NotNil(t, node.StateServ) - require.NotNil(t, node.AdminSigner) + require.NotNil(t, node.AdminServ) require.Equal(t, tt.tp, node.Type) ctx, cancel := context.WithCancel(context.Background()) @@ -43,14 +43,8 @@ func TestLifecycle(t *testing.T) { err := node.Start(ctx) require.NoError(t, err) - // ensure the state service is running - require.False(t, node.StateServ.IsStopped(ctx)) - err = node.Stop(ctx) require.NoError(t, err) - - // ensure the state service is stopped - require.True(t, node.StateServ.IsStopped(ctx)) }) } } @@ -81,7 +75,6 @@ func TestLifecycle_WithMetrics(t *testing.T) { otlpmetrichttp.WithInsecure(), }, tt.tp, - node.BuildInfo{}, ), ) require.NotNil(t, node) @@ -97,14 +90,8 @@ func TestLifecycle_WithMetrics(t *testing.T) { err := node.Start(ctx) require.NoError(t, err) - // ensure the state service is running - require.False(t, node.StateServ.IsStopped(ctx)) - err = node.Stop(ctx) require.NoError(t, err) - - // ensure the state service is stopped - require.True(t, node.StateServ.IsStopped(ctx)) }) } } diff --git a/nodebuilder/p2p/bitswap.go b/nodebuilder/p2p/bitswap.go index 3a99dd2c7d..19f98609fc 100644 --- a/nodebuilder/p2p/bitswap.go +++ b/nodebuilder/p2p/bitswap.go @@ -4,11 +4,11 @@ import ( "context" "fmt" + "github.com/ipfs/boxo/bitswap" + "github.com/ipfs/boxo/bitswap/network" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - exchange "github.com/ipfs/go-ipfs-exchange-interface" - "github.com/ipfs/go-libipfs/bitswap" - "github.com/ipfs/go-libipfs/bitswap/network" routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" hst "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/protocol" @@ -48,7 +48,7 @@ func blockstoreFromDatastore(ctx context.Context, ds datastore.Batching) (blocks blockstore.CacheOpts{ HasBloomFilterSize: defaultBloomFilterSize, HasBloomFilterHashes: defaultBloomFilterHashes, - HasARCCacheSize: defaultARCCacheSize, + HasTwoQueueCacheSize: defaultARCCacheSize, }, ) } @@ -58,7 +58,7 @@ func blockstoreFromEDSStore(ctx context.Context, store *eds.Store) (blockstore.B ctx, store.Blockstore(), blockstore.CacheOpts{ - HasARCCacheSize: defaultARCCacheSize, + HasTwoQueueCacheSize: defaultARCCacheSize, }, ) } diff --git a/nodebuilder/p2p/bootstrap.go b/nodebuilder/p2p/bootstrap.go index c534aa4fba..0718d8cf59 100644 --- a/nodebuilder/p2p/bootstrap.go +++ b/nodebuilder/p2p/bootstrap.go @@ -38,10 +38,10 @@ func bootstrappersFor(net Network) ([]string, error) { // NOTE: Every time we add a new long-running network, its bootstrap peers have to be added here. var bootstrapList = map[Network][]string{ Arabica: { - "/dns4/da-bridge-arabica-8.celestia-arabica.com/tcp/2121/p2p/12D3KooWDXkXARv79Dtn5xrGBgJePtCzCsEwWR7eGWnx9ZCyUyD6", - "/dns4/da-bridge-arabica-8-2.celestia-arabica.com/tcp/2121/p2p/12D3KooWPu8qKmmNgYFMBsTkLBa1m3D9Cy9ReCAoQLqxEn9MHD1i", - "/dns4/da-full-1-arabica-8.celestia-arabica.com/tcp/2121/p2p/12D3KooWEmeFodzypdTBTcw8Yub6WZRT4h1UgFtwCwwq6wS5Dtqm", - "/dns4/da-full-2-arabica-8.celestia-arabica.com/tcp/2121/p2p/12D3KooWCs3wFmqwPn1u8pNU4BGsvLsob1ShTzvps8qEtTRuuuK5", + "/dns4/da-bridge.celestia-arabica-10.com/tcp/2121/p2p/12D3KooWM3e9MWtyc8GkP8QRt74Riu17QuhGfZMytB2vq5NwkWAu", + "/dns4/da-bridge-2.celestia-arabica-10.com/tcp/2121/p2p/12D3KooWKj8mcdiBGxQRe1jqhaMnh2tGoC3rPDmr5UH2q8H4WA9M", + "/dns4/da-full-1.celestia-arabica-10.com/tcp/2121/p2p/12D3KooWBWkgmN7kmJSFovVrCjkeG47FkLGq7yEwJ2kEqNKCsBYk", + "/dns4/da-full-2.celestia-arabica-10.com/tcp/2121/p2p/12D3KooWRByRF67a2kVM2j4MP5Po3jgTw7H2iL2Spu8aUwPkrRfP", }, Mocha: { "/dns4/bootstr-mocha-1.celestia-mocha.com/tcp/2121/p2p/12D3KooWDRSJMbH3PS4dRDa11H7Tk615aqTUgkeEKz4pwd4sS6fN", diff --git a/nodebuilder/p2p/genesis.go b/nodebuilder/p2p/genesis.go index 0a36dc54cc..31ea0b0ae9 100644 --- a/nodebuilder/p2p/genesis.go +++ b/nodebuilder/p2p/genesis.go @@ -23,8 +23,8 @@ func GenesisFor(net Network) (string, error) { // NOTE: Every time we add a new long-running network, its genesis hash has to be added here. var genesisList = map[Network]string{ - Arabica: "E5D620B5BE7873222DCD83464C285FD0F215C209393E7481F9A5979280AD6CA2", - Mocha: "1181AF8EAE5DDF3CBBFF3BF3CC44C5B795DF5094F5A0CC0AE52921ECCA0AF3C8", + Arabica: "5904E55478BA4B3002EE885621E007A2A6A2399662841912219AECD5D5CBE393", + Mocha: "79A97034D569C4199A867439B1B7B77D4E1E1D9697212755E1CE6D920CDBB541", BlockspaceRace: "1A8491A72F73929680DAA6C93E3B593579261B2E76536BFA4F5B97D6FE76E088", Private: "", } diff --git a/nodebuilder/p2p/ipld.go b/nodebuilder/p2p/ipld.go index 6278538825..ad32cc39f5 100644 --- a/nodebuilder/p2p/ipld.go +++ b/nodebuilder/p2p/ipld.go @@ -1,9 +1,9 @@ package p2p import ( + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" "github.com/ipfs/go-blockservice" - blockstore "github.com/ipfs/go-ipfs-blockstore" - exchange "github.com/ipfs/go-ipfs-exchange-interface" ) // blockService constructs IPFS's BlockService for fetching arbitrary Merkle structures. diff --git a/nodebuilder/p2p/metrics.go b/nodebuilder/p2p/metrics.go index 3e6caf08ca..095c30d9b7 100644 --- a/nodebuilder/p2p/metrics.go +++ b/nodebuilder/p2p/metrics.go @@ -6,7 +6,6 @@ import ( "net/http" "time" - rcmgrObs "github.com/libp2p/go-libp2p/p2p/host/resource-manager/obs" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/fx" @@ -28,8 +27,6 @@ const ( // prometheusMetrics option sets up native libp2p metrics up func prometheusMetrics(lifecycle fx.Lifecycle, registerer prometheus.Registerer) error { - rcmgrObs.MustRegisterWith(registerer) - registry := registerer.(*prometheus.Registry) mux := http.NewServeMux() diff --git a/nodebuilder/p2p/network.go b/nodebuilder/p2p/network.go index dd04cd377c..29e0218bb2 100644 --- a/nodebuilder/p2p/network.go +++ b/nodebuilder/p2p/network.go @@ -12,9 +12,9 @@ const ( // DefaultNetwork is the default network of the current build. DefaultNetwork = Mocha // Arabica testnet. See: celestiaorg/networks. - Arabica Network = "arabica-8" + Arabica Network = "arabica-10" // Mocha testnet. See: celestiaorg/networks. - Mocha Network = "mocha-2" + Mocha Network = "mocha-3" // BlockspaceRace testnet. See: https://docs.celestia.org/nodes/blockspace-race/. BlockspaceRace Network = "blockspacerace-0" // Private can be used to set up any private network, including local testing setups. diff --git a/nodebuilder/p2p/pubsub.go b/nodebuilder/p2p/pubsub.go index 0061ab9eea..13d812e3ce 100644 --- a/nodebuilder/p2p/pubsub.go +++ b/nodebuilder/p2p/pubsub.go @@ -18,6 +18,8 @@ import ( "github.com/celestiaorg/go-fraud" "github.com/celestiaorg/go-fraud/fraudserv" headp2p "github.com/celestiaorg/go-header/p2p" + + "github.com/celestiaorg/celestia-node/header" ) func init() { @@ -66,7 +68,7 @@ func pubSub(cfg Config, params pubSubParams) (*pubsub.PubSub, error) { // * https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#peer-scoring // * lotus // * prysm - topicScores := topicScoreParams(params.Network) + topicScores := topicScoreParams(params) peerScores, err := peerScoreParams(params.Bootstrappers, cfg) if err != nil { return nil, err @@ -105,15 +107,16 @@ type pubSubParams struct { Host hst.Host Bootstrappers Bootstrappers Network Network + Unmarshaler fraud.ProofUnmarshaler[*header.ExtendedHeader] } -func topicScoreParams(network Network) map[string]*pubsub.TopicScoreParams { +func topicScoreParams(params pubSubParams) map[string]*pubsub.TopicScoreParams { mp := map[string]*pubsub.TopicScoreParams{ - headp2p.PubsubTopicID(network.String()): &headp2p.GossibSubScore, + headp2p.PubsubTopicID(params.Network.String()): &headp2p.GossibSubScore, } - for _, pt := range fraud.Registered() { - mp[fraudserv.PubsubTopicID(pt.String(), network.String())] = &fraudserv.GossibSubScore + for _, pt := range params.Unmarshaler.List() { + mp[fraudserv.PubsubTopicID(pt.String(), params.Network.String())] = &fraudserv.GossibSubScore } return mp diff --git a/nodebuilder/p2p/resources.go b/nodebuilder/p2p/resources.go index 371747463a..6e24e1e542 100644 --- a/nodebuilder/p2p/resources.go +++ b/nodebuilder/p2p/resources.go @@ -6,7 +6,6 @@ import ( "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/network" rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" - rcmgrObs "github.com/libp2p/go-libp2p/p2p/host/resource-manager/obs" ma "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" "go.uber.org/fx" @@ -60,7 +59,7 @@ func allowList(ctx context.Context, cfg Config, bootstrappers Bootstrappers) (rc } func traceReporter() rcmgr.Option { - str, err := rcmgrObs.NewStatsTraceReporter() + str, err := rcmgr.NewStatsTraceReporter() if err != nil { panic(err) // err is always nil as per sources } diff --git a/nodebuilder/settings.go b/nodebuilder/settings.go index bea3c78ad2..d56125209c 100644 --- a/nodebuilder/settings.go +++ b/nodebuilder/settings.go @@ -7,15 +7,22 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/pyroscope-io/client/pyroscope" + otelpyroscope "github.com/pyroscope-io/otel-profiling-go" + "go.opentelemetry.io/contrib/instrumentation/runtime" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + sdk "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.11.0" + "go.opentelemetry.io/otel/trace" "go.uber.org/fx" "github.com/celestiaorg/go-fraud" + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/nodebuilder/das" modheader "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" @@ -24,6 +31,8 @@ import ( "github.com/celestiaorg/celestia-node/state" ) +const defaultMetricsCollectInterval = 10 * time.Second + // WithNetwork specifies the Network to which the Node should connect to. // WARNING: Use this option with caution and never run the Node with different networks over the // same persisted Store. @@ -62,13 +71,17 @@ func WithPyroscope(endpoint string, nodeType node.Type) fx.Option { } // WithMetrics enables metrics exporting for the node. -func WithMetrics(metricOpts []otlpmetrichttp.Option, nodeType node.Type, buildInfo node.BuildInfo) fx.Option { +func WithMetrics(metricOpts []otlpmetrichttp.Option, nodeType node.Type) fx.Option { baseComponents := fx.Options( fx.Supply(metricOpts), - fx.Supply(buildInfo), fx.Invoke(initializeMetrics), - fx.Invoke(state.WithMetrics), - fx.Invoke(fraud.WithMetrics), + fx.Invoke(func(ca *state.CoreAccessor) { + if ca == nil { + return + } + state.WithMetrics(ca) + }), + fx.Invoke(fraud.WithMetrics[*header.ExtendedHeader]), fx.Invoke(node.WithMetrics), fx.Invoke(modheader.WithMetrics), fx.Invoke(share.WithDiscoveryMetrics), @@ -86,6 +99,7 @@ func WithMetrics(metricOpts []otlpmetrichttp.Option, nodeType node.Type, buildIn case node.Full: opts = fx.Options( baseComponents, + fx.Invoke(share.WithStoreMetrics), fx.Invoke(share.WithShrexServerMetrics), samplingMetrics, ) @@ -97,6 +111,7 @@ func WithMetrics(metricOpts []otlpmetrichttp.Option, nodeType node.Type, buildIn case node.Bridge: opts = fx.Options( baseComponents, + fx.Invoke(share.WithStoreMetrics), fx.Invoke(share.WithShrexServerMetrics), ) default: @@ -105,13 +120,55 @@ func WithMetrics(metricOpts []otlpmetrichttp.Option, nodeType node.Type, buildIn return opts } +func WithTraces(opts []otlptracehttp.Option, pyroOpts []otelpyroscope.Option) fx.Option { + options := fx.Options( + fx.Supply(opts), + fx.Supply(pyroOpts), + fx.Invoke(initializeTraces), + ) + return options +} + +func initializeTraces( + ctx context.Context, + nodeType node.Type, + peerID peer.ID, + network p2p.Network, + opts []otlptracehttp.Option, + pyroOpts []otelpyroscope.Option, +) error { + client := otlptracehttp.NewClient(opts...) + exporter, err := otlptrace.New(ctx, client) + if err != nil { + return fmt.Errorf("creating OTLP trace exporter: %w", err) + } + + var tp trace.TracerProvider + tp = tracesdk.NewTracerProvider( + tracesdk.WithSampler(tracesdk.AlwaysSample()), + // Always be sure to batch in production. + tracesdk.WithBatcher(exporter), + // Record information about this application in a Resource. + tracesdk.WithResource(resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceNamespaceKey.String(nodeType.String()), + semconv.ServiceNameKey.String(fmt.Sprintf("%s/%s", network.String(), peerID.String()))), + )) + + if len(pyroOpts) > 0 { + tp = otelpyroscope.NewTracerProvider(tp, pyroOpts...) + } + otel.SetTracerProvider(tp) + return nil +} + // initializeMetrics initializes the global meter provider. func initializeMetrics( ctx context.Context, lc fx.Lifecycle, peerID peer.ID, nodeType node.Type, - buildInfo node.BuildInfo, + network p2p.Network, opts []otlpmetrichttp.Option, ) error { exp, err := otlpmetrichttp.New(ctx, opts...) @@ -119,19 +176,34 @@ func initializeMetrics( return err } - provider := metric.NewMeterProvider( - metric.WithReader(metric.NewPeriodicReader(exp, metric.WithTimeout(2*time.Second))), - metric.WithResource(resource.NewWithAttributes( - semconv.SchemaURL, - semconv.ServiceNamespaceKey.String(fmt.Sprintf("Celestia-%s", nodeType.String())), - semconv.ServiceNameKey.String(fmt.Sprintf("semver-%s", buildInfo.SemanticVersion)), - semconv.ServiceInstanceIDKey.String(peerID.String()), - ))) + provider := sdk.NewMeterProvider( + sdk.WithReader( + sdk.NewPeriodicReader(exp, + sdk.WithTimeout(defaultMetricsCollectInterval), + sdk.WithInterval(defaultMetricsCollectInterval))), + sdk.WithResource( + resource.NewWithAttributes( + semconv.SchemaURL, + // ServiceNamespaceKey and ServiceNameKey will be concatenated into single attribute with key: + // "job" and value: "%service.namespace%/%service.name%" + semconv.ServiceNamespaceKey.String(network.String()), + semconv.ServiceNameKey.String(nodeType.String()), + // ServiceInstanceIDKey will be exported with key: "instance" + semconv.ServiceInstanceIDKey.String(peerID.String()), + ))) + + err = runtime.Start( + runtime.WithMinimumReadMemStatsInterval(defaultMetricsCollectInterval), + runtime.WithMeterProvider(provider)) + if err != nil { + return fmt.Errorf("start runtime metrics: %w", err) + } + lc.Append(fx.Hook{ OnStop: func(ctx context.Context) error { return provider.Shutdown(ctx) }, }) - global.SetMeterProvider(provider) + otel.SetMeterProvider(provider) return nil } diff --git a/nodebuilder/share/constructors.go b/nodebuilder/share/constructors.go index 1913b3d576..a1b7e39713 100644 --- a/nodebuilder/share/constructors.go +++ b/nodebuilder/share/constructors.go @@ -5,6 +5,7 @@ import ( "errors" "github.com/filecoin-project/dagstore" + "github.com/ipfs/go-blockservice" "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/routing" @@ -18,6 +19,7 @@ import ( "github.com/celestiaorg/celestia-node/share/availability/light" "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/getters" + "github.com/celestiaorg/celestia-node/share/ipld" disc "github.com/celestiaorg/celestia-node/share/p2p/discovery" ) @@ -51,15 +53,27 @@ func newModule(getter share.Getter, avail share.Availability) Module { // ensureEmptyCARExists adds an empty EDS to the provided EDS store. func ensureEmptyCARExists(ctx context.Context, store *eds.Store) error { emptyEDS := share.EmptyExtendedDataSquare() - emptyDAH := da.NewDataAvailabilityHeader(emptyEDS) + emptyDAH, err := da.NewDataAvailabilityHeader(emptyEDS) + if err != nil { + return err + } - err := store.Put(ctx, emptyDAH.Hash(), emptyEDS) + err = store.Put(ctx, emptyDAH.Hash(), emptyEDS) if errors.Is(err, dagstore.ErrShardExists) { return nil } return err } +// ensureEmptyEDSInBS checks if the given DAG contains an empty block data square. +// If it does not, it stores an empty block. This optimization exists to prevent +// redundant storing of empty block data so that it is only stored once and returned +// upon request for a block with an empty data square. +func ensureEmptyEDSInBS(ctx context.Context, bServ blockservice.BlockService) error { + _, err := ipld.AddShares(ctx, share.EmptyBlockShares(), bServ) + return err +} + func lightGetter( shrexGetter *getters.ShrexGetter, ipldGetter *getters.IPLDGetter, diff --git a/nodebuilder/share/mocks/api.go b/nodebuilder/share/mocks/api.go index 586c6dab4b..66baa23301 100644 --- a/nodebuilder/share/mocks/api.go +++ b/nodebuilder/share/mocks/api.go @@ -8,11 +8,11 @@ import ( context "context" reflect "reflect" + gomock "github.com/golang/mock/gomock" + da "github.com/celestiaorg/celestia-app/pkg/da" share "github.com/celestiaorg/celestia-node/share" - namespace "github.com/celestiaorg/nmt/namespace" rsmt2d "github.com/celestiaorg/rsmt2d" - gomock "github.com/golang/mock/gomock" ) // MockModule is a mock of Module interface. @@ -69,7 +69,7 @@ func (mr *MockModuleMockRecorder) GetShare(arg0, arg1, arg2, arg3 interface{}) * } // GetSharesByNamespace mocks base method. -func (m *MockModule) GetSharesByNamespace(arg0 context.Context, arg1 *da.DataAvailabilityHeader, arg2 namespace.ID) (share.NamespacedShares, error) { +func (m *MockModule) GetSharesByNamespace(arg0 context.Context, arg1 *da.DataAvailabilityHeader, arg2 share.Namespace) (share.NamespacedShares, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSharesByNamespace", arg0, arg1, arg2) ret0, _ := ret[0].(share.NamespacedShares) diff --git a/nodebuilder/share/module.go b/nodebuilder/share/module.go index 6dfb155bb0..b924cf8167 100644 --- a/nodebuilder/share/module.go +++ b/nodebuilder/share/module.go @@ -176,7 +176,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option } }), shrexGetterComponents, - fx.Invoke(share.EnsureEmptySquareExists), + fx.Invoke(ensureEmptyEDSInBS), fx.Provide(getters.NewIPLDGetter), fx.Provide(lightGetter), // shrexsub broadcaster stub for daser diff --git a/nodebuilder/share/opts.go b/nodebuilder/share/opts.go index 20ba0ce58c..e236847f41 100644 --- a/nodebuilder/share/opts.go +++ b/nodebuilder/share/opts.go @@ -1,6 +1,7 @@ package share import ( + "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/getters" disc "github.com/celestiaorg/celestia-node/share/p2p/discovery" "github.com/celestiaorg/celestia-node/share/p2p/peers" @@ -41,3 +42,7 @@ func WithShrexServerMetrics(edsServer *shrexeds.Server, ndServer *shrexnd.Server func WithShrexGetterMetrics(sg *getters.ShrexGetter) error { return sg.WithMetrics() } + +func WithStoreMetrics(s *eds.Store) error { + return s.WithMetrics() +} diff --git a/nodebuilder/share/share.go b/nodebuilder/share/share.go index 0c703f9a15..def3bb4d0f 100644 --- a/nodebuilder/share/share.go +++ b/nodebuilder/share/share.go @@ -3,7 +3,6 @@ package share import ( "context" - "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" @@ -40,7 +39,7 @@ type Module interface { GetEDS(ctx context.Context, root *share.Root) (*rsmt2d.ExtendedDataSquare, error) // GetSharesByNamespace gets all shares from an EDS within the given namespace. // Shares are returned in a row-by-row order if the namespace spans multiple rows. - GetSharesByNamespace(ctx context.Context, root *share.Root, namespace namespace.ID) (share.NamespacedShares, error) + GetSharesByNamespace(ctx context.Context, root *share.Root, namespace share.Namespace) (share.NamespacedShares, error) } // API is a wrapper around Module for the RPC. @@ -61,7 +60,7 @@ type API struct { GetSharesByNamespace func( ctx context.Context, root *share.Root, - namespace namespace.ID, + namespace share.Namespace, ) (share.NamespacedShares, error) `perm:"public"` } } @@ -85,7 +84,7 @@ func (api *API) GetEDS(ctx context.Context, root *share.Root) (*rsmt2d.ExtendedD func (api *API) GetSharesByNamespace( ctx context.Context, root *share.Root, - namespace namespace.ID, + namespace share.Namespace, ) (share.NamespacedShares, error) { return api.Internal.GetSharesByNamespace(ctx, root, namespace) } diff --git a/nodebuilder/share/share_test.go b/nodebuilder/share/share_test.go index 388fd9af07..7c440a6dbf 100644 --- a/nodebuilder/share/share_test.go +++ b/nodebuilder/share/share_test.go @@ -27,7 +27,8 @@ func Test_EmptyCARExists(t *testing.T) { require.NoError(t, err) eds := share.EmptyExtendedDataSquare() - dah := da.NewDataAvailabilityHeader(eds) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) // add empty EDS to store err = ensureEmptyCARExists(ctx, edsStore) diff --git a/nodebuilder/state/core.go b/nodebuilder/state/core.go index a3eb7f7b6d..f8f8508540 100644 --- a/nodebuilder/state/core.go +++ b/nodebuilder/state/core.go @@ -18,11 +18,11 @@ func coreAccessor( corecfg core.Config, signer *apptypes.KeyringSigner, sync *sync.Syncer[*header.ExtendedHeader], - fraudServ libfraud.Service, -) (*state.CoreAccessor, *modfraud.ServiceBreaker[*state.CoreAccessor]) { + fraudServ libfraud.Service[*header.ExtendedHeader], +) (*state.CoreAccessor, Module, *modfraud.ServiceBreaker[*state.CoreAccessor, *header.ExtendedHeader]) { ca := state.NewCoreAccessor(signer, sync, corecfg.IP, corecfg.RPCPort, corecfg.GRPCPort) - return ca, &modfraud.ServiceBreaker[*state.CoreAccessor]{ + return ca, ca, &modfraud.ServiceBreaker[*state.CoreAccessor, *header.ExtendedHeader]{ Service: ca, FraudType: byzantine.BadEncoding, FraudServ: fraudServ, diff --git a/nodebuilder/state/mocks/api.go b/nodebuilder/state/mocks/api.go index dbd1d5dabe..754920dee2 100644 --- a/nodebuilder/state/mocks/api.go +++ b/nodebuilder/state/mocks/api.go @@ -9,12 +9,13 @@ import ( reflect "reflect" math "cosmossdk.io/math" - blob "github.com/celestiaorg/celestia-node/blob" - state "github.com/celestiaorg/celestia-node/state" types "github.com/cosmos/cosmos-sdk/types" types0 "github.com/cosmos/cosmos-sdk/x/staking/types" gomock "github.com/golang/mock/gomock" types1 "github.com/tendermint/tendermint/types" + + blob "github.com/celestiaorg/celestia-node/blob" + state "github.com/celestiaorg/celestia-node/state" ) // MockModule is a mock of Module interface. diff --git a/nodebuilder/state/module.go b/nodebuilder/state/module.go index 24305dabe1..733419a918 100644 --- a/nodebuilder/state/module.go +++ b/nodebuilder/state/module.go @@ -6,6 +6,9 @@ import ( logging "github.com/ipfs/go-log/v2" "go.uber.org/fx" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/fxutil" + "github.com/celestiaorg/celestia-node/nodebuilder/core" modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/state" @@ -15,25 +18,26 @@ var log = logging.Logger("module/state") // ConstructModule provides all components necessary to construct the // state service. -func ConstructModule(tp node.Type, cfg *Config) fx.Option { +func ConstructModule(tp node.Type, cfg *Config, coreCfg *core.Config) fx.Option { // sanitize config values before constructing module cfgErr := cfg.Validate() baseComponents := fx.Options( fx.Supply(*cfg), fx.Error(cfgErr), - fx.Provide(fx.Annotate( + fxutil.ProvideIf(coreCfg.IsEndpointConfigured(), fx.Annotate( coreAccessor, - fx.OnStart(func(ctx context.Context, breaker *modfraud.ServiceBreaker[*state.CoreAccessor]) error { + fx.OnStart(func(ctx context.Context, + breaker *modfraud.ServiceBreaker[*state.CoreAccessor, *header.ExtendedHeader]) error { return breaker.Start(ctx) }), - fx.OnStop(func(ctx context.Context, breaker *modfraud.ServiceBreaker[*state.CoreAccessor]) error { + fx.OnStop(func(ctx context.Context, + breaker *modfraud.ServiceBreaker[*state.CoreAccessor, *header.ExtendedHeader]) error { return breaker.Stop(ctx) }), )), - // the module is needed for the handler - fx.Provide(func(ca *state.CoreAccessor) Module { - return ca + fxutil.ProvideIf(!coreCfg.IsEndpointConfigured(), func() (*state.CoreAccessor, Module) { + return nil, &stubbedStateModule{} }), ) diff --git a/nodebuilder/state/stub.go b/nodebuilder/state/stub.go new file mode 100644 index 0000000000..94326fed5e --- /dev/null +++ b/nodebuilder/state/stub.go @@ -0,0 +1,116 @@ +package state + +import ( + "context" + "errors" + + "github.com/cosmos/cosmos-sdk/x/staking/types" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/state" +) + +var ErrNoStateAccess = errors.New("node is running without state access") + +// stubbedStateModule provides a stub for the state module to return +// errors when state endpoints are accessed without a running connection +// to a core endpoint. +type stubbedStateModule struct{} + +func (s stubbedStateModule) IsStopped(context.Context) bool { + return true +} + +func (s stubbedStateModule) AccountAddress(context.Context) (state.Address, error) { + return state.Address{}, ErrNoStateAccess +} + +func (s stubbedStateModule) Balance(context.Context) (*state.Balance, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) BalanceForAddress( + context.Context, + state.Address, +) (*state.Balance, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) Transfer( + _ context.Context, + _ state.AccAddress, + _, _ state.Int, + _ uint64, +) (*state.TxResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) SubmitTx(context.Context, state.Tx) (*state.TxResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) SubmitPayForBlob( + context.Context, + state.Int, + uint64, + []*blob.Blob, +) (*state.TxResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) CancelUnbondingDelegation( + _ context.Context, + _ state.ValAddress, + _, _, _ state.Int, + _ uint64, +) (*state.TxResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) BeginRedelegate( + _ context.Context, + _, _ state.ValAddress, + _, _ state.Int, + _ uint64, +) (*state.TxResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) Undelegate( + _ context.Context, + _ state.ValAddress, + _, _ state.Int, + _ uint64, +) (*state.TxResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) Delegate( + _ context.Context, + _ state.ValAddress, + _, _ state.Int, + _ uint64, +) (*state.TxResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) QueryDelegation( + context.Context, + state.ValAddress, +) (*types.QueryDelegationResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) QueryUnbonding( + context.Context, + state.ValAddress, +) (*types.QueryUnbondingDelegationResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) QueryRedelegations( + _ context.Context, + _, _ state.ValAddress, +) (*types.QueryRedelegationsResponse, error) { + return nil, ErrNoStateAccess +} diff --git a/nodebuilder/store.go b/nodebuilder/store.go index 046dd41bbd..6d313893b1 100644 --- a/nodebuilder/store.go +++ b/nodebuilder/store.go @@ -5,13 +5,14 @@ import ( "fmt" "path/filepath" "sync" + "time" "github.com/cosmos/cosmos-sdk/crypto/keyring" - "github.com/dgraph-io/badger/v2/options" "github.com/ipfs/go-datastore" - dsbadger "github.com/ipfs/go-ds-badger2" "github.com/mitchellh/go-homedir" + dsbadger "github.com/celestiaorg/go-ds-badger4" + "github.com/celestiaorg/celestia-node/libs/fslock" "github.com/celestiaorg/celestia-node/libs/keystore" ) @@ -104,69 +105,47 @@ func (f *fsStore) PutConfig(cfg *Config) error { } func (f *fsStore) Keystore() (_ keystore.Keystore, err error) { - f.lock.RLock() - defer f.lock.RUnlock() if f.keys == nil { return nil, fmt.Errorf("node: no Keystore found") } return f.keys, nil } -func (f *fsStore) Datastore() (_ datastore.Batching, err error) { - f.lock.RLock() +func (f *fsStore) Datastore() (datastore.Batching, error) { + f.dataMu.Lock() + defer f.dataMu.Unlock() if f.data != nil { - f.lock.RUnlock() return f.data, nil } - f.lock.RUnlock() - - f.lock.Lock() - defer f.lock.Unlock() opts := dsbadger.DefaultOptions // this should be copied + opts.GcInterval = time.Minute * 10 - // Badger sets ValueThreshold to 1K by default and this makes shares being stored in LSM tree - // instead of the value log, so we change the value to be lower than share size, - // so shares are store in value log. For value log and LSM definitions - opts.ValueThreshold = 128 - // We always write unique values to Badger transaction so there is no need to detect conflicts. - opts.DetectConflicts = false - // Use MemoryMap for better performance - opts.ValueLogLoadingMode = options.MemoryMap - opts.TableLoadingMode = options.MemoryMap - // Truncate set to true will truncate corrupted data on start if there is any. - // If we don't truncate, the node will refuse to start and will beg for recovering, etc. - // If we truncate, the node will start with any uncorrupted data and reliably sync again what was - // corrupted in most cases. - opts.Truncate = true - // MaxTableSize defines in memory and on disk size of LSM tree - // Bigger values constantly takes more RAM - // TODO(@Wondertan): Make configurable with more conservative defaults for Light Node - opts.MaxTableSize = 64 << 20 - - f.data, err = dsbadger.NewDatastore(dataPath(f.path), &opts) + ds, err := dsbadger.NewDatastore(dataPath(f.path), &opts) if err != nil { return nil, fmt.Errorf("node: can't open Badger Datastore: %w", err) } - return f.data, nil + f.data = ds + return ds, nil } func (f *fsStore) Close() (err error) { err = errors.Join(err, f.dirLock.Unlock()) + f.dataMu.Lock() if f.data != nil { err = errors.Join(err, f.data.Close()) } + f.dataMu.Unlock() return } type fsStore struct { path string - data datastore.Batching - keys keystore.Keystore - - lock sync.RWMutex // protects all the fields + dataMu sync.Mutex + data datastore.Batching + keys keystore.Keystore dirLock *fslock.Locker // protects directory } diff --git a/nodebuilder/store_test.go b/nodebuilder/store_test.go index 512d45bb70..8a39849060 100644 --- a/nodebuilder/store_test.go +++ b/nodebuilder/store_test.go @@ -1,13 +1,25 @@ package nodebuilder import ( + "context" "strconv" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" ) func TestRepo(t *testing.T) { @@ -50,3 +62,122 @@ func TestRepo(t *testing.T) { }) } } + +func BenchmarkStore(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + b.Cleanup(cancel) + + // BenchmarkStore/bench_read_128-10 14 78970661 ns/op (~70ms) + b.Run("bench put 128", func(b *testing.B) { + dir := b.TempDir() + err := Init(*DefaultConfig(node.Full), dir, node.Full) + require.NoError(b, err) + + store := newStore(ctx, b, dir) + size := 128 + b.Run("enabled eds proof caching", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + adder := ipld.NewProofsAdder(size * 2) + shares := sharetest.RandShares(b, size*size) + eds, err := rsmt2d.ComputeExtendedDataSquare( + shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(size), + nmt.NodeVisitor(adder.VisitFn())), + ) + require.NoError(b, err) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(b, err) + ctx := ipld.CtxWithProofsAdder(ctx, adder) + + b.StartTimer() + err = store.edsStore.Put(ctx, dah.Hash(), eds) + b.StopTimer() + require.NoError(b, err) + } + }) + + b.Run("disabled eds proof caching", func(b *testing.B) { + b.ResetTimer() + b.StopTimer() + for i := 0; i < b.N; i++ { + eds := edstest.RandEDS(b, size) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(b, err) + + b.StartTimer() + err = store.edsStore.Put(ctx, dah.Hash(), eds) + b.StopTimer() + require.NoError(b, err) + } + }) + }) +} + +func TestStoreRestart(t *testing.T) { + const ( + blocks = 5 + size = 32 + ) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + dir := t.TempDir() + err := Init(*DefaultConfig(node.Full), dir, node.Full) + require.NoError(t, err) + + store := newStore(ctx, t, dir) + + hashes := make([][]byte, blocks) + for i := range hashes { + edss := edstest.RandEDS(t, size) + require.NoError(t, err) + dah, err := da.NewDataAvailabilityHeader(edss) + require.NoError(t, err) + err = store.edsStore.Put(ctx, dah.Hash(), edss) + require.NoError(t, err) + + // store hashes for read loop later + hashes[i] = dah.Hash() + } + + // restart store + store.stop(ctx, t) + store = newStore(ctx, t, dir) + + for _, h := range hashes { + edsReader, err := store.edsStore.GetCAR(ctx, h) + require.NoError(t, err) + odsReader, err := eds.ODSReader(edsReader) + require.NoError(t, err) + _, err = eds.ReadEDS(ctx, odsReader, h) + require.NoError(t, err) + } +} + +type store struct { + s Store + edsStore *eds.Store +} + +func newStore(ctx context.Context, t require.TestingT, dir string) store { + s, err := OpenStore(dir, nil) + require.NoError(t, err) + ds, err := s.Datastore() + require.NoError(t, err) + edsStore, err := eds.NewStore(dir, ds) + require.NoError(t, err) + err = edsStore.Start(ctx) + require.NoError(t, err) + return store{ + s: s, + edsStore: edsStore, + } +} + +func (s *store) stop(ctx context.Context, t *testing.T) { + require.NoError(t, s.edsStore.Stop(ctx)) + require.NoError(t, s.s.Close()) +} diff --git a/nodebuilder/testing.go b/nodebuilder/testing.go index 6cb40a2b6c..36f2c2f47f 100644 --- a/nodebuilder/testing.go +++ b/nodebuilder/testing.go @@ -11,9 +11,10 @@ import ( apptypes "github.com/celestiaorg/celestia-app/x/blob/types" "github.com/celestiaorg/celestia-node/core" + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/libs/fxutil" - "github.com/celestiaorg/celestia-node/nodebuilder/header" + modhead "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/nodebuilder/state" @@ -47,7 +48,7 @@ func TestNodeWithConfig(t *testing.T, tp node.Type, cfg *Config, opts ...fx.Opti // temp dir for the eds store FIXME: Should be in mem fx.Replace(node.StorePath(t.TempDir())), // avoid requesting trustedPeer during initialization - fxutil.ReplaceAs(headertest.NewStore(t), new(header.InitStore)), + fxutil.ReplaceAs(headertest.NewStore(t), new(modhead.InitStore[*header.ExtendedHeader])), ) // in fact, we don't need core.Client in tests, but Bridge requires is a valid one diff --git a/nodebuilder/tests/api_test.go b/nodebuilder/tests/api_test.go index 13cf083eee..3a66c4e58c 100644 --- a/nodebuilder/tests/api_test.go +++ b/nodebuilder/tests/api_test.go @@ -13,29 +13,64 @@ import ( "github.com/celestiaorg/celestia-node/api/rpc/client" "github.com/celestiaorg/celestia-node/blob" "github.com/celestiaorg/celestia-node/blob/blobtest" - "github.com/celestiaorg/celestia-node/libs/authtoken" "github.com/celestiaorg/celestia-node/nodebuilder" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" ) -func TestGetByHeight(t *testing.T) { +func TestNodeModule(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) t.Cleanup(cancel) sw := swamp.NewSwamp(t, swamp.WithBlockTime(time.Second)) - // start a bridge node bridge := sw.NewBridgeNode() err := bridge.Start(ctx) require.NoError(t, err) - signer := bridge.AdminSigner bridgeAddr := "http://" + bridge.RPCServer.ListenAddr() - jwt, err := authtoken.NewSignedJWT(signer, []auth.Permission{"public", "read", "write", "admin"}) + writePerms := []auth.Permission{"public", "read", "write"} + adminPerms := []auth.Permission{"public", "read", "write", "admin"} + jwt, err := bridge.AdminServ.AuthNew(ctx, adminPerms) + require.NoError(t, err) + + client, err := client.NewClient(ctx, bridgeAddr, jwt) + require.NoError(t, err) + + info, err := client.Node.Info(ctx) + require.NoError(t, err) + require.Equal(t, info.APIVersion, node.APIVersion) + + perms, err := client.Node.AuthVerify(ctx, jwt) + require.NoError(t, err) + require.Equal(t, perms, adminPerms) + + writeJWT, err := client.Node.AuthNew(ctx, writePerms) + require.NoError(t, err) + + perms, err = client.Node.AuthVerify(ctx, writeJWT) + require.NoError(t, err) + require.Equal(t, perms, writePerms) + +} + +func TestGetByHeight(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(time.Second)) + + // start a bridge node + bridge := sw.NewBridgeNode() + err := bridge.Start(ctx) + require.NoError(t, err) + + adminPerms := []auth.Permission{"public", "read", "write", "admin"} + jwt, err := bridge.AdminServ.AuthNew(ctx, adminPerms) require.NoError(t, err) + bridgeAddr := "http://" + bridge.RPCServer.ListenAddr() client, err := client.NewClient(ctx, bridgeAddr, jwt) require.NoError(t, err) @@ -45,12 +80,12 @@ func TestGetByHeight(t *testing.T) { networkHead, err := client.Header.NetworkHead(ctx) require.NoError(t, err) - _, err = client.Header.GetByHeight(ctx, uint64(networkHead.Height()+1)) + _, err = client.Header.GetByHeight(ctx, networkHead.Height()+1) require.Nil(t, err, "Requesting syncer.Head()+1 shouldn't return an error") networkHead, err = client.Header.NetworkHead(ctx) require.NoError(t, err) - _, err = client.Header.GetByHeight(ctx, uint64(networkHead.Height()+2)) + _, err = client.Header.GetByHeight(ctx, networkHead.Height()+2) require.ErrorContains(t, err, "given height is from the future") } @@ -66,16 +101,15 @@ func TestBlobRPC(t *testing.T) { err := bridge.Start(ctx) require.NoError(t, err) - signer := bridge.AdminSigner - bridgeAddr := "http://" + bridge.RPCServer.ListenAddr() - - jwt, err := authtoken.NewSignedJWT(signer, []auth.Permission{"public", "read", "write", "admin"}) + adminPerms := []auth.Permission{"public", "read", "write", "admin"} + jwt, err := bridge.AdminServ.AuthNew(ctx, adminPerms) require.NoError(t, err) + bridgeAddr := "http://" + bridge.RPCServer.ListenAddr() client, err := client.NewClient(ctx, bridgeAddr, jwt) require.NoError(t, err) - appBlobs, err := blobtest.GenerateBlobs([]int{8}, false) + appBlobs, err := blobtest.GenerateV0Blobs([]int{8}, false) require.NoError(t, err) newBlob, err := blob.NewBlob( diff --git a/nodebuilder/tests/blob_test.go b/nodebuilder/tests/blob_test.go index 537e320c5a..a9e3a0464b 100644 --- a/nodebuilder/tests/blob_test.go +++ b/nodebuilder/tests/blob_test.go @@ -11,22 +11,21 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/celestiaorg/nmt/namespace" - "github.com/celestiaorg/celestia-node/blob" "github.com/celestiaorg/celestia-node/blob/blobtest" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" + "github.com/celestiaorg/celestia-node/share" ) func TestBlobModule(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second) t.Cleanup(cancel) - sw := swamp.NewSwamp(t) + sw := swamp.NewSwamp(t, swamp.WithBlockTime(time.Second*1)) - appBlobs0, err := blobtest.GenerateBlobs([]int{8, 4}, true) + appBlobs0, err := blobtest.GenerateV0Blobs([]int{8, 4}, true) require.NoError(t, err) - appBlobs1, err := blobtest.GenerateBlobs([]int{4}, false) + appBlobs1, err := blobtest.GenerateV0Blobs([]int{4}, false) require.NoError(t, err) blobs := make([]*blob.Blob, 0, len(appBlobs0)+len(appBlobs1)) @@ -79,7 +78,7 @@ func TestBlobModule(t *testing.T) { { name: "GetAll", doFn: func(t *testing.T) { - newBlobs, err := fullNode.BlobServ.GetAll(ctx, height, []namespace.ID{blobs[0].Namespace()}) + newBlobs, err := fullNode.BlobServ.GetAll(ctx, height, []share.Namespace{blobs[0].Namespace()}) require.NoError(t, err) require.Len(t, newBlobs, len(appBlobs0)) require.True(t, bytes.Equal(blobs[0].Commitment, newBlobs[0].Commitment)) @@ -106,7 +105,7 @@ func TestBlobModule(t *testing.T) { { name: "Not Found", doFn: func(t *testing.T) { - appBlob, err := blobtest.GenerateBlobs([]int{4}, false) + appBlob, err := blobtest.GenerateV0Blobs([]int{4}, false) require.NoError(t, err) newBlob, err := blob.NewBlob( appBlob[0].ShareVersion, diff --git a/nodebuilder/tests/nd_test.go b/nodebuilder/tests/nd_test.go new file mode 100644 index 0000000000..cb02e2a178 --- /dev/null +++ b/nodebuilder/tests/nd_test.go @@ -0,0 +1,203 @@ +package tests + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/stretchr/testify/require" + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/nodebuilder" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/getters" + "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" +) + +func TestShrexNDFromLights(t *testing.T) { + const ( + blocks = 10 + btime = time.Millisecond * 300 + bsize = 16 + ) + + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks) + + bridge := sw.NewBridgeNode() + sw.SetBootstrapper(t, bridge) + + cfg := nodebuilder.DefaultConfig(node.Light) + cfg.Share.Discovery.PeersLimit = 1 + light := sw.NewNodeWithConfig(node.Light, cfg) + + err := bridge.Start(ctx) + require.NoError(t, err) + err = light.Start(ctx) + require.NoError(t, err) + + // wait for chain to be filled + require.NoError(t, <-fillDn) + + // first 15 blocks are not filled with data + // + // TODO: we need to stop guessing + // the block that actually has transactions. We can get this data from the + // response returned by FillBlock. + for i := 16; i < blocks; i++ { + h, err := bridge.HeaderServ.GetByHeight(ctx, uint64(i)) + require.NoError(t, err) + + reqCtx, cancel := context.WithTimeout(ctx, time.Second*5) + + // ensure to fetch random namespace (not the reserved namespace) + namespace := h.DAH.RowRoots[1][:share.NamespaceSize] + + expected, err := bridge.ShareServ.GetSharesByNamespace(reqCtx, h.DAH, namespace) + require.NoError(t, err) + got, err := light.ShareServ.GetSharesByNamespace(reqCtx, h.DAH, namespace) + require.NoError(t, err) + + require.True(t, len(got[0].Shares) > 0) + require.Equal(t, expected, got) + + cancel() + } +} + +func TestShrexNDFromLightsWithBadFulls(t *testing.T) { + const ( + blocks = 10 + btime = time.Millisecond * 300 + bsize = 16 + amountOfFulls = 5 + testTimeout = time.Second * 10 + ) + + ctx, cancel := context.WithTimeout(context.Background(), testTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks) + + bridge := sw.NewBridgeNode() + sw.SetBootstrapper(t, bridge) + + // create full nodes with basic stream.reset handler + ndHandler := func(stream network.Stream) { + _ = stream.Reset() + } + fulls := make([]*nodebuilder.Node, 0, amountOfFulls) + for i := 0; i < amountOfFulls; i++ { + cfg := nodebuilder.DefaultConfig(node.Full) + setTimeInterval(cfg, testTimeout) + full := sw.NewNodeWithConfig(node.Full, cfg, replaceNDServer(cfg, ndHandler), replaceShareGetter()) + fulls = append(fulls, full) + } + + lnConfig := nodebuilder.DefaultConfig(node.Light) + lnConfig.Share.Discovery.PeersLimit = uint(amountOfFulls) + light := sw.NewNodeWithConfig(node.Light, lnConfig) + + // start all nodes + require.NoError(t, bridge.Start(ctx)) + require.NoError(t, startFullNodes(ctx, fulls...)) + require.NoError(t, light.Start(ctx)) + + // wait for chain to fill up + require.NoError(t, <-fillDn) + + // first 2 blocks are not filled with data + for i := 3; i < blocks; i++ { + h, err := bridge.HeaderServ.GetByHeight(ctx, uint64(i)) + require.NoError(t, err) + + if len(h.DAH.RowRoots) != bsize*2 { + // fill blocks does not always fill every block to the given block + // size - this check prevents trying to fetch shares for the parity + // namespace. + continue + } + + reqCtx, cancel := context.WithTimeout(ctx, time.Second*5) + + // ensure to fetch random namespace (not the reserved namespace) + namespace := h.DAH.RowRoots[1][:share.NamespaceSize] + + expected, err := bridge.ShareServ.GetSharesByNamespace(reqCtx, h.DAH, namespace) + require.NoError(t, err) + require.True(t, len(expected[0].Shares) > 0) + + // choose a random full to test + gotFull, err := fulls[len(fulls)/2].ShareServ.GetSharesByNamespace(reqCtx, h.DAH, namespace) + require.NoError(t, err) + require.True(t, len(gotFull[0].Shares) > 0) + + gotLight, err := light.ShareServ.GetSharesByNamespace(reqCtx, h.DAH, namespace) + require.NoError(t, err) + require.True(t, len(gotLight[0].Shares) > 0) + + require.Equal(t, expected, gotFull) + require.Equal(t, expected, gotLight) + + cancel() + } +} + +func startFullNodes(ctx context.Context, fulls ...*nodebuilder.Node) error { + for _, full := range fulls { + err := full.Start(ctx) + if err != nil { + return err + } + } + return nil +} + +func replaceNDServer(cfg *nodebuilder.Config, handler network.StreamHandler) fx.Option { + return fx.Decorate(fx.Annotate( + func( + host host.Host, + store *eds.Store, + getter *getters.StoreGetter, + network p2p.Network, + ) (*shrexnd.Server, error) { + cfg.Share.ShrExNDParams.WithNetworkID(network.String()) + return shrexnd.NewServer(cfg.Share.ShrExNDParams, host, store, getter) + }, + fx.OnStart(func(ctx context.Context, server *shrexnd.Server) error { + // replace handler for server + server.SetHandler(handler) + return server.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, server *shrexnd.Server) error { + return server.Start(ctx) + }), + )) +} + +func replaceShareGetter() fx.Option { + return fx.Decorate(fx.Annotate( + func( + host host.Host, + store *eds.Store, + storeGetter *getters.StoreGetter, + shrexGetter *getters.ShrexGetter, + network p2p.Network, + ) share.Getter { + cascade := make([]share.Getter, 0, 2) + cascade = append(cascade, storeGetter) + cascade = append(cascade, getters.NewTeeGetter(shrexGetter, store)) + return getters.NewCascadeGetter(cascade) + }, + )) +} diff --git a/nodebuilder/tests/reconstruct_test.go b/nodebuilder/tests/reconstruct_test.go index a4e3296c2f..d8640c5249 100644 --- a/nodebuilder/tests/reconstruct_test.go +++ b/nodebuilder/tests/reconstruct_test.go @@ -14,6 +14,7 @@ import ( "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" @@ -53,6 +54,11 @@ func TestFullReconstructFromBridge(t *testing.T) { err := bridge.Start(ctx) require.NoError(t, err) + // TODO: This is required to avoid flakes coming from unfinished retry + // mechanism for the same peer in go-header + _, err = bridge.HeaderServ.WaitForHeight(ctx, uint64(blocks)) + require.NoError(t, err) + cfg := nodebuilder.DefaultConfig(node.Full) cfg.Share.UseShareExchange = false cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, getMultiAddr(t, bridge.Host)) @@ -76,6 +82,156 @@ func TestFullReconstructFromBridge(t *testing.T) { require.NoError(t, errg.Wait()) } +/* +Test-Case: Full Node reconstructs blocks from each other, after unsuccessfully syncing the complete +block from LN subnetworks. Analog to TestShareAvailable_DisconnectedFullNodes. +*/ +func TestFullReconstructFromFulls(t *testing.T) { + light.DefaultSampleAmount = 10 // s + const ( + blocks = 10 + btime = time.Millisecond * 300 + bsize = 8 // k + lnodes = 12 // c - total number of nodes on two subnetworks + ) + + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks) + + const defaultTimeInterval = time.Second * 5 + bridge := sw.NewBridgeNode() + + sw.SetBootstrapper(t, bridge) + require.NoError(t, bridge.Start(ctx)) + + // TODO: This is required to avoid flakes coming from unfinished retry + // mechanism for the same peer in go-header + _, err := bridge.HeaderServ.WaitForHeight(ctx, uint64(blocks)) + require.NoError(t, err) + + lights1 := make([]*nodebuilder.Node, lnodes/2) + lights2 := make([]*nodebuilder.Node, lnodes/2) + subs := make([]event.Subscription, lnodes) + errg, errCtx := errgroup.WithContext(ctx) + for i := 0; i < lnodes/2; i++ { + i := i + errg.Go(func() error { + lnConfig := nodebuilder.DefaultConfig(node.Light) + setTimeInterval(lnConfig, defaultTimeInterval) + light := sw.NewNodeWithConfig(node.Light, lnConfig) + sub, err := light.Host.EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) + if err != nil { + return err + } + subs[i] = sub + lights1[i] = light + return light.Start(errCtx) + }) + errg.Go(func() error { + lnConfig := nodebuilder.DefaultConfig(node.Light) + setTimeInterval(lnConfig, defaultTimeInterval) + light := sw.NewNodeWithConfig(node.Light, lnConfig) + sub, err := light.Host.EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) + if err != nil { + return err + } + subs[(lnodes/2)+i] = sub + lights2[i] = light + return light.Start(errCtx) + }) + } + + require.NoError(t, errg.Wait()) + + for i := 0; i < lnodes; i++ { + select { + case <-ctx.Done(): + t.Fatal("peer was not found") + case <-subs[i].Out(): + require.NoError(t, subs[i].Close()) + continue + } + } + + // Remove bootstrappers to prevent FNs from connecting to bridge + sw.Bootstrappers = []ma.Multiaddr{} + // Use light nodes from respective subnetworks as bootstrappers to prevent connection to bridge + lnBootstrapper1, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(lights1[0].Host)) + require.NoError(t, err) + lnBootstrapper2, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(lights2[0].Host)) + require.NoError(t, err) + + cfg := nodebuilder.DefaultConfig(node.Full) + setTimeInterval(cfg, defaultTimeInterval) + cfg.Share.UseShareExchange = false + cfg.Share.Discovery.PeersLimit = 0 + cfg.Header.TrustedPeers = []string{lnBootstrapper1[0].String()} + full1 := sw.NewNodeWithConfig(node.Full, cfg) + cfg.Header.TrustedPeers = []string{lnBootstrapper2[0].String()} + full2 := sw.NewNodeWithConfig(node.Full, cfg) + require.NoError(t, full1.Start(ctx)) + require.NoError(t, full2.Start(ctx)) + + // Form topology + for i := 0; i < lnodes/2; i++ { + // Separate light nodes into two subnetworks + for j := 0; j < lnodes/2; j++ { + sw.Disconnect(t, lights1[i], lights2[j]) + if i != j { + sw.Connect(t, lights1[i], lights1[j]) + sw.Connect(t, lights2[i], lights2[j]) + } + } + + sw.Connect(t, full1, lights1[i]) + sw.Disconnect(t, full1, lights2[i]) + + sw.Connect(t, full2, lights2[i]) + sw.Disconnect(t, full2, lights1[i]) + } + + // Ensure the fulls are not connected to the bridge + sw.Disconnect(t, full1, full2) + sw.Disconnect(t, full1, bridge) + sw.Disconnect(t, full2, bridge) + + h, err := full1.HeaderServ.WaitForHeight(ctx, uint64(10+blocks-1)) + require.NoError(t, err) + + // Ensure that the full nodes cannot reconstruct before being connected to each other + ctxErr, cancelErr := context.WithTimeout(ctx, time.Second*30) + errg, errCtx = errgroup.WithContext(ctxErr) + errg.Go(func() error { + return full1.ShareServ.SharesAvailable(errCtx, h.DAH) + }) + errg.Go(func() error { + return full2.ShareServ.SharesAvailable(errCtx, h.DAH) + }) + require.Error(t, errg.Wait()) + cancelErr() + + // Reconnect FNs + sw.Connect(t, full1, full2) + + errg, bctx := errgroup.WithContext(ctx) + for i := 10; i < blocks+11; i++ { + h, err := full1.HeaderServ.WaitForHeight(bctx, uint64(i)) + require.NoError(t, err) + errg.Go(func() error { + return full1.ShareServ.SharesAvailable(bctx, h.DAH) + }) + errg.Go(func() error { + return full2.ShareServ.SharesAvailable(bctx, h.DAH) + }) + } + + require.NoError(t, <-fillDn) + require.NoError(t, errg.Wait()) +} + /* Test-Case: Full Node reconstructs blocks only from Light Nodes Pre-Reqs: @@ -125,6 +281,11 @@ func TestFullReconstructFromLights(t *testing.T) { require.NoError(t, bridge.Start(ctx)) bootstrapperAddr := host.InfoFromHost(bootstrapper.Host) + // TODO: This is required to avoid flakes coming from unfinished retry + // mechanism for the same peer in go-header + _, err = bridge.HeaderServ.WaitForHeight(ctx, uint64(blocks)) + require.NoError(t, err) + cfg = nodebuilder.DefaultConfig(node.Full) setTimeInterval(cfg, defaultTimeInterval) cfg.Share.UseShareExchange = false diff --git a/nodebuilder/tests/swamp/config.go b/nodebuilder/tests/swamp/config.go index 630920609f..047baa9f59 100644 --- a/nodebuilder/tests/swamp/config.go +++ b/nodebuilder/tests/swamp/config.go @@ -3,35 +3,30 @@ package swamp import ( "time" + "github.com/celestiaorg/celestia-app/test/util/testnode" + "github.com/celestiaorg/celestia-node/core" ) -// Config struct represents a set of pre-requisite attributes from the test scenario -type Config struct { - *core.TestConfig -} - // DefaultConfig creates a celestia-app instance with a block time of around // 100ms -func DefaultConfig() *Config { +func DefaultConfig() *testnode.Config { cfg := core.DefaultTestConfig() - // target height duration lower than this tend to be flakier - cfg.Tendermint.Consensus.TargetHeightDuration = 200 * time.Millisecond - return &Config{ - cfg, - } + // timeout commit lower than this tend to be flakier + cfg.TmConfig.Consensus.TimeoutCommit = 200 * time.Millisecond + return cfg } // Option for modifying Swamp's Config. -type Option func(*Config) +type Option func(*testnode.Config) // WithBlockTime sets a custom interval for block creation. func WithBlockTime(t time.Duration) Option { - return func(c *Config) { + return func(c *testnode.Config) { // for empty block - c.Tendermint.Consensus.CreateEmptyBlocksInterval = t + c.TmConfig.Consensus.CreateEmptyBlocksInterval = t // for filled block - c.Tendermint.Consensus.TargetHeightDuration = t - c.Tendermint.Consensus.SkipTimeoutCommit = false + c.TmConfig.Consensus.TimeoutCommit = t + c.TmConfig.Consensus.SkipTimeoutCommit = false } } diff --git a/nodebuilder/tests/swamp/swamp.go b/nodebuilder/tests/swamp/swamp.go index eabe33dc9c..58584912be 100644 --- a/nodebuilder/tests/swamp/swamp.go +++ b/nodebuilder/tests/swamp/swamp.go @@ -48,7 +48,7 @@ const DefaultTestTimeout = time.Minute * 5 // - trustedHash taken from the CoreClient and shared between nodes type Swamp struct { t *testing.T - cfg *Config + cfg *testnode.Config Network mocknet.Mocknet Bootstrappers []ma.Multiaddr @@ -76,7 +76,7 @@ func NewSwamp(t *testing.T, options ...Option) *Swamp { // Now, we are making an assumption that consensus mechanism is already tested out // so, we are not creating bridge nodes with each one containing its own core client // instead we are assigning all created BNs to 1 Core from the swamp - cctx := core.StartTestNodeWithConfig(t, ic.TestConfig) + cctx := core.StartTestNodeWithConfig(t, ic) swp := &Swamp{ t: t, cfg: ic, @@ -164,7 +164,7 @@ func (s *Swamp) createPeer(ks keystore.Keystore) host.Host { // setupGenesis sets up genesis Header. // This is required to initialize and start correctly. func (s *Swamp) setupGenesis() { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() // ensure core has surpassed genesis block @@ -188,7 +188,7 @@ func (s *Swamp) setupGenesis() { func (s *Swamp) DefaultTestConfig(tp node.Type) *nodebuilder.Config { cfg := nodebuilder.DefaultConfig(tp) - ip, port, err := net.SplitHostPort(s.cfg.App.GRPC.Address) + ip, port, err := net.SplitHostPort(s.cfg.AppConfig.GRPC.Address) require.NoError(s.t, err) cfg.Core.IP = ip diff --git a/nodebuilder/tests/sync_test.go b/nodebuilder/tests/sync_test.go index 1d51e2abf9..65db1332ff 100644 --- a/nodebuilder/tests/sync_test.go +++ b/nodebuilder/tests/sync_test.go @@ -295,7 +295,7 @@ func TestSyncLightAgainstFull(t *testing.T) { require.NoError(t, err) bridgeHead, err := bridge.HeaderServ.LocalHead(ctx) require.NoError(t, err) - _, err = full.HeaderServ.WaitForHeight(ctx, uint64(bridgeHead.Height())) + _, err = full.HeaderServ.WaitForHeight(ctx, bridgeHead.Height()) require.NoError(t, err) assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) @@ -317,7 +317,7 @@ func TestSyncLightAgainstFull(t *testing.T) { require.NoError(t, err) fullHead, err := full.HeaderServ.LocalHead(ctx) require.NoError(t, err) - _, err = light.HeaderServ.WaitForHeight(ctx, uint64(fullHead.Height())) + _, err = light.HeaderServ.WaitForHeight(ctx, fullHead.Height()) require.NoError(t, err) // wait for the core block filling process to exit diff --git a/share/availability.go b/share/availability.go index 9538573114..b6b44271c8 100644 --- a/share/availability.go +++ b/share/availability.go @@ -4,7 +4,7 @@ import ( "context" "errors" - da "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/da" ) // ErrNotAvailable is returned whenever DA sampling fails. diff --git a/share/availability/full/availability.go b/share/availability/full/availability.go index cfa5bd0c39..fb8dead839 100644 --- a/share/availability/full/availability.go +++ b/share/availability/full/availability.go @@ -4,7 +4,6 @@ import ( "context" "errors" - ipldFormat "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" "github.com/celestiaorg/celestia-node/share" @@ -72,13 +71,14 @@ func (fa *ShareAvailability) SharesAvailable(ctx context.Context, root *share.Ro _, err := fa.getter.GetEDS(ctx, root) if err != nil { + if errors.Is(err, context.Canceled) { + return err + } log.Errorw("availability validation failed", "root", root.String(), "err", err.Error()) var byzantineErr *byzantine.ErrByzantine - if ipldFormat.IsNotFound(err) || errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &byzantineErr) { + if errors.Is(err, share.ErrNotFound) || errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &byzantineErr) { return share.ErrNotAvailable } - - return err } return err } diff --git a/share/availability/full/availability_test.go b/share/availability/full/availability_test.go index 48ae0534aa..b6d217cd6b 100644 --- a/share/availability/full/availability_test.go +++ b/share/availability/full/availability_test.go @@ -4,9 +4,16 @@ import ( "context" "testing" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/celestiaorg/celestia-app/pkg/da" + + "github.com/celestiaorg/celestia-node/share" availability_test "github.com/celestiaorg/celestia-node/share/availability/test" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/mocks" ) func TestShareAvailableOverMocknet_Full(t *testing.T) { @@ -32,3 +39,22 @@ func TestSharesAvailable_Full(t *testing.T) { err := avail.SharesAvailable(ctx, dah) assert.NoError(t, err) } + +func TestSharesAvailable_Full_ErrNotAvailable(t *testing.T) { + ctrl := gomock.NewController(t) + getter := mocks.NewMockGetter(ctrl) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eds := edstest.RandEDS(t, 4) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) + avail := TestAvailability(getter) + + errors := []error{share.ErrNotFound, context.DeadlineExceeded} + for _, getterErr := range errors { + getter.EXPECT().GetEDS(gomock.Any(), gomock.Any()).Return(nil, getterErr) + err := avail.SharesAvailable(ctx, &dah) + require.ErrorIs(t, err, share.ErrNotAvailable) + } +} diff --git a/share/availability/full/reconstruction_test.go b/share/availability/full/reconstruction_test.go index 03b998d49e..f3b6ce91bd 100644 --- a/share/availability/full/reconstruction_test.go +++ b/share/availability/full/reconstruction_test.go @@ -181,35 +181,15 @@ func TestShareAvailable_DisconnectedFullNodes(t *testing.T) { light.DefaultSampleAmount = 20 // s const ( origSquareSize = 16 // k - lightNodes = 60 // c - total number of nodes on two subnetworks + lightNodes = 32 // c - total number of nodes on two subnetworks ) - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) defer cancel() net := availability_test.NewTestDAGNet(ctx, t) source, root := RandNode(net, origSquareSize) - // create two full nodes and ensure they are disconnected - full1 := Node(net) - full2 := Node(net) - net.Disconnect(full1.ID(), full2.ID()) - - // ensure fulls and source are not connected - // so that fulls take data from light nodes only - net.Disconnect(full1.ID(), source.ID()) - net.Disconnect(full2.ID(), source.ID()) - - // start reconstruction for fulls that should fail - ctxErr, cancelErr := context.WithTimeout(ctx, eds.RetrieveQuadrantTimeout*8) - errg, errCtx := errgroup.WithContext(ctxErr) - errg.Go(func() error { - return full1.SharesAvailable(errCtx, root) - }) - errg.Go(func() error { - return full2.SharesAvailable(errCtx, root) - }) - // create light nodes and start sampling for them immediately lights1, lights2 := make( []*availability_test.TestNode, lightNodes/2), @@ -237,6 +217,16 @@ func TestShareAvailable_DisconnectedFullNodes(t *testing.T) { }(i) } + // create two full nodes and ensure they are disconnected + full1 := Node(net) + full2 := Node(net) + net.Disconnect(full1.ID(), full2.ID()) + + // ensure fulls and source are not connected + // so that fulls take data from light nodes only + net.Disconnect(full1.ID(), source.ID()) + net.Disconnect(full2.ID(), source.ID()) + // shape topology for i := 0; i < len(lights1); i++ { // ensure lights1 are only connected to source and full1 @@ -249,6 +239,16 @@ func TestShareAvailable_DisconnectedFullNodes(t *testing.T) { net.Disconnect(lights2[i].ID(), full1.ID()) } + // start reconstruction for fulls that should fail + ctxErr, cancelErr := context.WithTimeout(ctx, time.Second*5) + errg, errCtx := errgroup.WithContext(ctxErr) + errg.Go(func() error { + return full1.SharesAvailable(errCtx, root) + }) + errg.Go(func() error { + return full2.SharesAvailable(errCtx, root) + }) + // check that any of the fulls cannot reconstruct on their own err := errg.Wait() require.ErrorIs(t, err, share.ErrNotAvailable) @@ -262,10 +262,14 @@ func TestShareAvailable_DisconnectedFullNodes(t *testing.T) { full2.ClearStorage() // they both should be able to reconstruct the block - err = full1.SharesAvailable(ctx, root) - require.NoError(t, err, share.ErrNotAvailable) - err = full2.SharesAvailable(ctx, root) - require.NoError(t, err, share.ErrNotAvailable) + errg, bctx := errgroup.WithContext(ctx) + errg.Go(func() error { + return full1.SharesAvailable(bctx, root) + }) + errg.Go(func() error { + return full2.SharesAvailable(bctx, root) + }) + require.NoError(t, errg.Wait()) // wait for all routines to finish before exit, in case there are any errors to log wg.Wait() } diff --git a/share/availability/light/availability.go b/share/availability/light/availability.go index 761671b955..cc2e08129e 100644 --- a/share/availability/light/availability.go +++ b/share/availability/light/availability.go @@ -84,13 +84,13 @@ func (la *ShareAvailability) SharesAvailable(ctx context.Context, dah *share.Roo } if err != nil { - if !errors.Is(err, context.Canceled) { - log.Errorw("availability validation failed", "root", dah.String(), "err", err.Error()) + if errors.Is(err, context.Canceled) { + return err } + log.Errorw("availability validation failed", "root", dah.String(), "err", err.Error()) if ipldFormat.IsNotFound(err) || errors.Is(err, context.DeadlineExceeded) { return share.ErrNotAvailable } - return err } } diff --git a/share/availability/light/availability_test.go b/share/availability/light/availability_test.go index 4980af031e..48813a33f9 100644 --- a/share/availability/light/availability_test.go +++ b/share/availability/light/availability_test.go @@ -10,11 +10,11 @@ import ( "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-app/pkg/namespace" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" availability_test "github.com/celestiaorg/celestia-node/share/availability/test" + "github.com/celestiaorg/celestia-node/share/sharetest" ) func TestSharesAvailable(t *testing.T) { @@ -81,23 +81,23 @@ func TestService_GetSharesByNamespace(t *testing.T) { t.Run("size: "+strconv.Itoa(tt.squareSize), func(t *testing.T) { getter, bServ := EmptyGetter() totalShares := tt.squareSize * tt.squareSize - randShares := share.RandShares(t, totalShares) + randShares := sharetest.RandShares(t, totalShares) idx1 := (totalShares - 1) / 2 idx2 := totalShares / 2 if tt.expectedShareCount > 1 { - // make it so that two rows have the same namespace ID - copy(randShares[idx2][:namespace.NamespaceSize], randShares[idx1][:namespace.NamespaceSize]) + // make it so that two rows have the same namespace + copy(share.GetNamespace(randShares[idx2]), share.GetNamespace(randShares[idx1])) } root := availability_test.FillBS(t, bServ, randShares) - randNID := randShares[idx1][:namespace.NamespaceSize] + randNamespace := share.GetNamespace(randShares[idx1]) - shares, err := getter.GetSharesByNamespace(context.Background(), root, randNID) + shares, err := getter.GetSharesByNamespace(context.Background(), root, randNamespace) require.NoError(t, err) - require.NoError(t, shares.Verify(root, randNID)) + require.NoError(t, shares.Verify(root, randNamespace)) flattened := shares.Flatten() assert.Len(t, flattened, tt.expectedShareCount) for _, value := range flattened { - assert.Equal(t, randNID, []byte(share.ID(value))) + assert.Equal(t, randNamespace, share.GetNamespace(value)) } if tt.expectedShareCount > 1 { // idx1 is always smaller than idx2 @@ -105,14 +105,14 @@ func TestService_GetSharesByNamespace(t *testing.T) { assert.Equal(t, randShares[idx2], flattened[1]) } }) - t.Run("last two rows of a 4x4 square that have the same namespace ID have valid NMT proofs", func(t *testing.T) { + t.Run("last two rows of a 4x4 square that have the same namespace have valid NMT proofs", func(t *testing.T) { squareSize := 4 totalShares := squareSize * squareSize getter, bServ := EmptyGetter() - randShares := share.RandShares(t, totalShares) - lastNID := randShares[totalShares-1][:namespace.NamespaceSize] + randShares := sharetest.RandShares(t, totalShares) + lastNID := share.GetNamespace(randShares[totalShares-1]) for i := totalShares / 2; i < totalShares; i++ { - copy(randShares[i][:namespace.NamespaceSize], lastNID) + copy(share.GetNamespace(randShares[i]), lastNID) } root := availability_test.FillBS(t, bServ, randShares) @@ -132,7 +132,8 @@ func TestGetShares(t *testing.T) { eds, err := getter.GetEDS(ctx, dah) require.NoError(t, err) - gotDAH := da.NewDataAvailabilityHeader(eds) + gotDAH, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) require.True(t, dah.Equals(&gotDAH)) } @@ -141,8 +142,9 @@ func TestService_GetSharesByNamespaceNotFound(t *testing.T) { getter, root := GetterWithRandSquare(t, 1) root.RowRoots = nil - _, err := getter.GetSharesByNamespace(context.Background(), root, namespace.RandomNamespace().Bytes()) - assert.ErrorIs(t, err, share.ErrNamespaceNotFound) + emptyShares, err := getter.GetSharesByNamespace(context.Background(), root, sharetest.RandV0Namespace()) + require.NoError(t, err) + require.Empty(t, emptyShares.Flatten()) } func BenchmarkService_GetSharesByNamespace(b *testing.B) { @@ -158,11 +160,11 @@ func BenchmarkService_GetSharesByNamespace(b *testing.B) { b.Run(strconv.Itoa(tt.amountShares), func(b *testing.B) { t := &testing.T{} getter, root := GetterWithRandSquare(t, tt.amountShares) - randNID := root.RowRoots[(len(root.RowRoots)-1)/2][:8] + randNamespace := root.RowRoots[(len(root.RowRoots)-1)/2][:share.NamespaceSize] root.RowRoots[(len(root.RowRoots) / 2)] = root.RowRoots[(len(root.RowRoots)-1)/2] b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := getter.GetSharesByNamespace(context.Background(), root, randNID) + _, err := getter.GetSharesByNamespace(context.Background(), root, randNamespace) require.NoError(t, err) } }) diff --git a/share/availability/test/corrupt_data.go b/share/availability/test/corrupt_data.go index 1bb8bd243a..f0bd8fbbc5 100644 --- a/share/availability/test/corrupt_data.go +++ b/share/availability/test/corrupt_data.go @@ -7,11 +7,11 @@ import ( mrand "math/rand" "testing" + "github.com/ipfs/boxo/blockstore" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - blocks "github.com/ipfs/go-libipfs/blocks" ) var _ blockstore.Blockstore = (*FraudulentBlockstore)(nil) diff --git a/share/availability/test/testing.go b/share/availability/test/testing.go index b2fb780bd4..8977678198 100644 --- a/share/availability/test/testing.go +++ b/share/availability/test/testing.go @@ -4,13 +4,13 @@ import ( "context" "testing" + "github.com/ipfs/boxo/bitswap" + "github.com/ipfs/boxo/bitswap/network" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/routing/offline" "github.com/ipfs/go-blockservice" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/ipfs/go-ipfs-routing/offline" - "github.com/ipfs/go-libipfs/bitswap" - "github.com/ipfs/go-libipfs/bitswap/network" record "github.com/libp2p/go-libp2p-record" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" @@ -20,19 +20,22 @@ import ( "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" ) // RandFillBS fills the given BlockService with a random block of a given size. func RandFillBS(t *testing.T, n int, bServ blockservice.BlockService) *share.Root { - shares := share.RandShares(t, n*n) + shares := sharetest.RandShares(t, n*n) return FillBS(t, bServ, shares) } // FillBS fills the given BlockService with the given shares. func FillBS(t *testing.T, bServ blockservice.BlockService, shares []share.Share) *share.Root { - eds, err := share.AddShares(context.TODO(), shares, bServ) + eds, err := ipld.AddShares(context.TODO(), shares, bServ) + require.NoError(t, err) + dah, err := da.NewDataAvailabilityHeader(eds) require.NoError(t, err) - dah := da.NewDataAvailabilityHeader(eds) return &dah } diff --git a/share/doc.go b/share/doc.go index 7ef8c9f7bf..97229932a7 100644 --- a/share/doc.go +++ b/share/doc.go @@ -4,7 +4,7 @@ block data. Though this package contains several useful methods for getting specific shares and/or sampling them at random, a particularly useful method is GetSharesByNamespace which retrieves -all shares of block data of the given namespace.ID from the block associated with the given +all shares of block data of the given Namespace from the block associated with the given DataAvailabilityHeader (DAH, but referred to as Root within this package). This package also contains declaration of the Availability interface. Implementations of diff --git a/share/eds/accessor_cache.go b/share/eds/accessor_cache.go index eac79b946f..cd0f0537fa 100644 --- a/share/eds/accessor_cache.go +++ b/share/eds/accessor_cache.go @@ -1,6 +1,7 @@ package eds import ( + "context" "errors" "fmt" "reflect" @@ -9,6 +10,8 @@ import ( "github.com/filecoin-project/dagstore" "github.com/filecoin-project/dagstore/shard" lru "github.com/hashicorp/golang-lru" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" ) var ( @@ -32,11 +35,23 @@ type blockstoreCache struct { // caches the blockstore for a given shard for shard read affinity i.e. // further reads will likely be from the same shard. Maps (shard key -> blockstore). cache *lru.Cache + + metrics *cacheMetrics } func newBlockstoreCache(cacheSize int) (*blockstoreCache, error) { + bc := &blockstoreCache{} // instantiate the blockstore cache - bslru, err := lru.NewWithEvict(cacheSize, func(_ interface{}, val interface{}) { + bslru, err := lru.NewWithEvict(cacheSize, bc.evictFn()) + if err != nil { + return nil, fmt.Errorf("failed to instantiate blockstore cache: %w", err) + } + bc.cache = bslru + return bc, nil +} + +func (bc *blockstoreCache) evictFn() func(_ interface{}, val interface{}) { + return func(_ interface{}, val interface{}) { // ensure we close the blockstore for a shard when it's evicted so dagstore can gc it. abs, ok := val.(*accessorWithBlockstore) if !ok { @@ -46,14 +61,12 @@ func newBlockstoreCache(cacheSize int) (*blockstoreCache, error) { )) } - if err := abs.sa.Close(); err != nil { + err := abs.sa.Close() + if err != nil { log.Errorf("couldn't close accessor after cache eviction: %s", err) } - }) - if err != nil { - return nil, fmt.Errorf("failed to instantiate blockstore cache: %w", err) + bc.metrics.observeEvicted(err != nil) } - return &blockstoreCache{cache: bslru}, nil } // Get retrieves the blockstore for a given shard key from the cache. If the blockstore is not in @@ -117,3 +130,41 @@ func (bc *blockstoreCache) unsafeAdd( func shardKeyToStriped(sk shard.Key) byte { return sk.String()[len(sk.String())-1] } + +type cacheMetrics struct { + evictedCounter metric.Int64Counter +} + +func (bc *blockstoreCache) withMetrics() error { + evictedCounter, err := meter.Int64Counter("eds_blockstore_cache_evicted_counter", + metric.WithDescription("eds blockstore cache evicted event counter")) + if err != nil { + return err + } + + cacheSize, err := meter.Int64ObservableGauge("eds_blockstore_cache_size", + metric.WithDescription("total amount of items in blockstore cache"), + ) + if err != nil { + return err + } + + callback := func(ctx context.Context, observer metric.Observer) error { + observer.ObserveInt64(cacheSize, int64(bc.cache.Len())) + return nil + } + _, err = meter.RegisterCallback(callback, cacheSize) + if err != nil { + return err + } + bc.metrics = &cacheMetrics{evictedCounter: evictedCounter} + return nil +} + +func (m *cacheMetrics) observeEvicted(failed bool) { + if m == nil { + return + } + m.evictedCounter.Add(context.Background(), 1, metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} diff --git a/share/eds/adapters.go b/share/eds/adapters.go index 8d98f092d7..fe498400b3 100644 --- a/share/eds/adapters.go +++ b/share/eds/adapters.go @@ -5,9 +5,9 @@ import ( "sync" "github.com/filecoin-project/dagstore" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" - "github.com/ipfs/go-libipfs/blocks" ) var _ blockservice.BlockGetter = (*BlockGetter)(nil) diff --git a/share/eds/adapters_test.go b/share/eds/adapters_test.go index 6ccd29e99d..70165b81c8 100644 --- a/share/eds/adapters_test.go +++ b/share/eds/adapters_test.go @@ -8,8 +8,8 @@ import ( "testing" "time" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/ipfs/go-libipfs/blocks" "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-node/share/ipld" diff --git a/share/eds/blockstore.go b/share/eds/blockstore.go index 3c63fa3945..22ef28f821 100644 --- a/share/eds/blockstore.go +++ b/share/eds/blockstore.go @@ -6,16 +6,19 @@ import ( "fmt" "github.com/filecoin-project/dagstore" + bstore "github.com/ipfs/boxo/blockstore" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" - bstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/ipfs/go-datastore/namespace" + dshelp "github.com/ipfs/go-ipfs-ds-help" ipld "github.com/ipfs/go-ipld-format" - blocks "github.com/ipfs/go-libipfs/blocks" ) var _ bstore.Blockstore = (*blockstore)(nil) var ( + blockstoreCacheKey = datastore.NewKey("bs-cache") errUnsupportedOperation = errors.New("unsupported operation") ) @@ -30,29 +33,42 @@ var ( type blockstore struct { store *Store cache *blockstoreCache + ds datastore.Batching } -func newBlockstore(store *Store, cache *blockstoreCache) *blockstore { +func newBlockstore(store *Store, cache *blockstoreCache, ds datastore.Batching) *blockstore { return &blockstore{ store: store, cache: cache, + ds: namespace.Wrap(ds, blockstoreCacheKey), } } func (bs *blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { keys, err := bs.store.dgstr.ShardsContainingMultihash(ctx, cid.Hash()) - if errors.Is(err, ErrNotFound) { - return false, nil + if errors.Is(err, ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { + // key wasn't found in top level blockstore, but could be in datastore while being reconstructed + dsHas, dsErr := bs.ds.Has(ctx, dshelp.MultihashToDsKey(cid.Hash())) + if dsErr != nil { + return false, nil + } + return dsHas, nil } if err != nil { - return false, fmt.Errorf("failed to find shards containing multihash: %w", err) + return false, err } + return len(keys) > 0, nil } func (bs *blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { blockstr, err := bs.getReadOnlyBlockstore(ctx, cid) - if errors.Is(err, ErrNotFound) { + if errors.Is(err, ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { + k := dshelp.MultihashToDsKey(cid.Hash()) + blockData, err := bs.ds.Get(ctx, k) + if err == nil { + return blocks.NewBlockWithCid(blockData, cid) + } // nmt's GetNode expects an ipld.ErrNotFound when a cid is not found. return nil, ipld.ErrNotFound{Cid: cid} } @@ -65,7 +81,12 @@ func (bs *blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error func (bs *blockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { blockstr, err := bs.getReadOnlyBlockstore(ctx, cid) - if errors.Is(err, ErrNotFound) { + if errors.Is(err, ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { + k := dshelp.MultihashToDsKey(cid.Hash()) + size, err := bs.ds.GetSize(ctx, k) + if err == nil { + return size, nil + } // nmt's GetSize expects an ipld.ErrNotFound when a cid is not found. return 0, ipld.ErrNotFound{Cid: cid} } @@ -75,27 +96,35 @@ func (bs *blockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { return blockstr.GetSize(ctx, cid) } -// DeleteBlock is a noop on the EDS blockstore that returns an errUnsupportedOperation when called. -func (bs *blockstore) DeleteBlock(context.Context, cid.Cid) error { - return errUnsupportedOperation +func (bs *blockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { + k := dshelp.MultihashToDsKey(cid.Hash()) + return bs.ds.Delete(ctx, k) } -// Put is a noop on the EDS blockstore, but it does not return an error because it is called by -// bitswap. For clarification, an implementation of Put does not make sense in this context because -// it is unclear which CAR file the block should be written to. -// -// TODO: throw errUnsupportedOperation after issue #1440 -func (bs *blockstore) Put(context.Context, blocks.Block) error { - return nil +func (bs *blockstore) Put(ctx context.Context, blk blocks.Block) error { + k := dshelp.MultihashToDsKey(blk.Cid().Hash()) + // note: we leave duplicate resolution to the underlying datastore + return bs.ds.Put(ctx, k, blk.RawData()) } -// PutMany is a noop on the EDS blockstore, but it does not return an error because it is called by -// bitswap. For clarification, an implementation of PutMany does not make sense in this context -// because it is unclear which CAR file the blocks should be written to. -// -// TODO: throw errUnsupportedOperation after issue #1440 -func (bs *blockstore) PutMany(context.Context, []blocks.Block) error { - return nil +func (bs *blockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { + if len(blocks) == 1 { + // performance fast-path + return bs.Put(ctx, blocks[0]) + } + + t, err := bs.ds.Batch(ctx) + if err != nil { + return err + } + for _, b := range blocks { + k := dshelp.MultihashToDsKey(b.Cid().Hash()) + err = t.Put(ctx, k, b.RawData()) + if err != nil { + return err + } + } + return t.Commit(ctx) } // AllKeysChan is a noop on the EDS blockstore because the keys are not stored in a single CAR file. @@ -112,7 +141,7 @@ func (bs *blockstore) HashOnRead(bool) { // getReadOnlyBlockstore finds the underlying blockstore of the shard that contains the given CID. func (bs *blockstore) getReadOnlyBlockstore(ctx context.Context, cid cid.Cid) (dagstore.ReadBlockstore, error) { keys, err := bs.store.dgstr.ShardsContainingMultihash(ctx, cid.Hash()) - if errors.Is(err, datastore.ErrNotFound) { + if errors.Is(err, datastore.ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { return nil, ErrNotFound } if err != nil { diff --git a/share/eds/byzantine/bad_encoding.go b/share/eds/byzantine/bad_encoding.go index 4e673ed8b8..e3a862e38a 100644 --- a/share/eds/byzantine/bad_encoding.go +++ b/share/eds/byzantine/bad_encoding.go @@ -7,7 +7,6 @@ import ( "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/go-fraud" - libhead "github.com/celestiaorg/go-header" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" @@ -17,12 +16,10 @@ import ( ) const ( - BadEncoding fraud.ProofType = "badencoding" -) + version = "v0.1" -func init() { - fraud.Register(&BadEncodingProof{}) -} + BadEncoding fraud.ProofType = "badencoding" + version +) type BadEncodingProof struct { headerHash []byte @@ -44,8 +41,7 @@ func CreateBadEncodingProof( hash []byte, height uint64, errByzantine *ErrByzantine, -) fraud.Proof { - +) fraud.Proof[*header.ExtendedHeader] { return &BadEncodingProof{ headerHash: hash, BlockHeight: height, @@ -110,63 +106,96 @@ func (p *BadEncodingProof) UnmarshalBinary(data []byte) error { // Validate checks that provided Merkle Proofs correspond to the shares, // rebuilds bad row or col from received shares, computes Merkle Root // and compares it with block's Merkle Root. -func (p *BadEncodingProof) Validate(hdr libhead.Header) error { - header, ok := hdr.(*header.ExtendedHeader) - if !ok { - panic(fmt.Sprintf("invalid header type: expected %T, got %T", header, hdr)) - } - if header.Height() != int64(p.BlockHeight) { - return errors.New("fraud: incorrect block height") +func (p *BadEncodingProof) Validate(hdr *header.ExtendedHeader) error { + if hdr.Height() != p.BlockHeight { + return fmt.Errorf("incorrect block height during BEFP validation: expected %d, got %d", + p.BlockHeight, hdr.Height(), + ) } - merkleRowRoots := header.DAH.RowRoots - merkleColRoots := header.DAH.ColumnRoots - if len(merkleRowRoots) != len(merkleColRoots) { + + if len(hdr.DAH.RowRoots) != len(hdr.DAH.ColumnRoots) { // NOTE: This should never happen as callers of this method should not feed it with a // malformed extended header. panic(fmt.Sprintf( - "fraud: invalid extended header: length of row and column roots do not match. (rowRoots=%d) (colRoots=%d)", - len(merkleRowRoots), - len(merkleColRoots)), + "invalid extended header: length of row and column roots do not match. (rowRoots=%d) (colRoots=%d)", + len(hdr.DAH.RowRoots), + len(hdr.DAH.ColumnRoots)), ) } - if int(p.Index) >= len(merkleRowRoots) { - return fmt.Errorf("fraud: invalid proof: index out of bounds (%d >= %d)", int(p.Index), len(merkleRowRoots)) + + // merkleRoots are the roots against which we are going to check the inclusion of the received + // shares. Changing the order of the roots to prove the shares relative to the orthogonal axis, + // because inside the rsmt2d library rsmt2d.Row = 0 and rsmt2d.Col = 1 + merkleRoots := hdr.DAH.RowRoots + if p.Axis == rsmt2d.Row { + merkleRoots = hdr.DAH.ColumnRoots } - if len(merkleRowRoots) != len(p.Shares) { - return fmt.Errorf("fraud: invalid proof: incorrect number of shares %d != %d", len(p.Shares), len(merkleRowRoots)) + + if int(p.Index) >= len(merkleRoots) { + return fmt.Errorf("invalid %s proof: index out of bounds (%d >= %d)", + BadEncoding, int(p.Index), len(merkleRoots), + ) } - root := merkleRowRoots[p.Index] - if p.Axis == rsmt2d.Col { - root = merkleColRoots[p.Index] + if len(p.Shares) != len(merkleRoots) { + // Since p.Shares should contain all the shares from either a row or a + // column, it should exactly match the number of row roots. In this + // context, the number of row roots is the width of the extended data + // square. + return fmt.Errorf("invalid %s proof: incorrect number of shares %d != %d", + BadEncoding, len(p.Shares), len(merkleRoots), + ) } - // verify that Merkle proofs correspond to particular shares. - shares := make([][]byte, len(merkleRowRoots)) - for index, share := range p.Shares { + odsWidth := uint64(len(merkleRoots) / 2) + amount := uint64(0) + for _, share := range p.Shares { if share == nil { continue } + amount++ + if amount == odsWidth { + break + } + } + + if amount < odsWidth { + return errors.New("fraud: invalid proof: not enough shares provided to reconstruct row/col") + } + + // verify that Merkle proofs correspond to particular shares. + shares := make([][]byte, len(merkleRoots)) + for index, shr := range p.Shares { + if shr == nil { + continue + } // validate inclusion of the share into one of the DAHeader roots - if ok := share.Validate(ipld.MustCidFromNamespacedSha256(root)); !ok { - return fmt.Errorf("fraud: invalid proof: incorrect share received at index %d", index) + if ok := shr.Validate(ipld.MustCidFromNamespacedSha256(merkleRoots[index])); !ok { + return fmt.Errorf("invalid %s proof: incorrect share received at index %d", BadEncoding, index) } // NMTree commits the additional namespace while rsmt2d does not know about, so we trim it // this is ugliness from NMTWrapper that we have to embrace ¯\_(ツ)_/¯ - shares[index] = share.Share[ipld.NamespaceSize:] + shares[index] = share.GetData(shr.Share) } - odsWidth := uint64(len(merkleRowRoots) / 2) codec := share.DefaultRSMT2DCodec() - // rebuild a row or col. + // We can conclude that the proof is valid in case we proved the inclusion of `Shares` but + // the row/col can't be reconstructed, or the building of NMTree fails. rebuiltShares, err := codec.Decode(shares) if err != nil { - return err + log.Infow("failed to decode shares at height", + "height", hdr.Height(), "err", err, + ) + return nil } + rebuiltExtendedShares, err := codec.Encode(rebuiltShares[0:odsWidth]) if err != nil { - return err + log.Infow("failed to encode shares at height", + "height", hdr.Height(), "err", err, + ) + return nil } copy(rebuiltShares[odsWidth:], rebuiltExtendedShares) @@ -174,19 +203,30 @@ func (p *BadEncodingProof) Validate(hdr libhead.Header) error { for _, share := range rebuiltShares { err = tree.Push(share) if err != nil { - return err + log.Infow("failed to build a tree from the reconstructed shares at height", + "height", hdr.Height(), "err", err, + ) + return nil } } expectedRoot, err := tree.Root() if err != nil { - return err + log.Infow("failed to build a tree root at height", + "height", hdr.Height(), "err", err, + ) + return nil + } + + // root is a merkle root of the row/col where ErrByzantine occurred + root := hdr.DAH.RowRoots[p.Index] + if p.Axis == rsmt2d.Col { + root = hdr.DAH.ColumnRoots[p.Index] } // comparing rebuilt Merkle Root of bad row/col with respective Merkle Root of row/col from block. if bytes.Equal(expectedRoot, root) { - return errors.New("fraud: invalid proof: recomputed Merkle root matches the DAH's row/column root") + return fmt.Errorf("invalid %s proof: recomputed Merkle root matches the DAH's row/column root", BadEncoding) } - return nil } diff --git a/share/eds/byzantine/bad_encoding_test.go b/share/eds/byzantine/bad_encoding_test.go index 6e413b17a2..b5dcea3452 100644 --- a/share/eds/byzantine/bad_encoding_test.go +++ b/share/eds/byzantine/bad_encoding_test.go @@ -3,8 +3,10 @@ package byzantine import ( "context" "testing" + "time" mdutils "github.com/ipfs/go-merkledag/test" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" core "github.com/tendermint/tendermint/types" @@ -12,10 +14,35 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" ) +func TestBadEncodingFraudProof(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) + defer t.Cleanup(cancel) + bServ := mdutils.Bserv() + + square := edstest.RandByzantineEDS(t, 16) + dah, err := da.NewDataAvailabilityHeader(square) + require.NoError(t, err) + err = ipld.ImportEDS(ctx, square, bServ) + require.NoError(t, err) + + var errRsmt2d *rsmt2d.ErrByzantineData + err = square.Repair(dah.RowRoots, dah.ColumnRoots) + require.ErrorAs(t, err, &errRsmt2d) + + errByz := NewErrByzantine(ctx, bServ, &dah, errRsmt2d) + + befp := CreateBadEncodingProof([]byte("hash"), 0, errByz) + err = befp.Validate(&header.ExtendedHeader{ + DAH: &dah, + }) + assert.NoError(t, err) +} + // TestIncorrectBadEncodingFraudProof asserts that BEFP is not generated for the correct data func TestIncorrectBadEncodingFraudProof(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) @@ -24,12 +51,13 @@ func TestIncorrectBadEncodingFraudProof(t *testing.T) { bServ := mdutils.Bserv() squareSize := 8 - shares := share.RandShares(t, squareSize*squareSize) + shares := sharetest.RandShares(t, squareSize*squareSize) - eds, err := share.AddShares(ctx, shares, bServ) + eds, err := ipld.AddShares(ctx, shares, bServ) require.NoError(t, err) - dah := da.NewDataAvailabilityHeader(eds) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) // get an arbitrary row row := uint(squareSize / 2) @@ -58,7 +86,7 @@ func TestIncorrectBadEncodingFraudProof(t *testing.T) { }, } - proof := CreateBadEncodingProof(h.Hash(), uint64(h.Height()), &fakeError) + proof := CreateBadEncodingProof(h.Hash(), h.Height(), &fakeError) err = proof.Validate(h) require.Error(t, err) } diff --git a/share/eds/byzantine/byzantine.go b/share/eds/byzantine/byzantine.go index b9c8ef414f..0fcd78273e 100644 --- a/share/eds/byzantine/byzantine.go +++ b/share/eds/byzantine/byzantine.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/ipfs/go-blockservice" + "golang.org/x/sync/errgroup" "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/rsmt2d" @@ -35,17 +36,41 @@ func NewErrByzantine( dah *da.DataAvailabilityHeader, errByz *rsmt2d.ErrByzantineData, ) *ErrByzantine { - root := [][][]byte{ - dah.RowRoots, + // changing the order to collect proofs against an orthogonal axis + roots := [][][]byte{ dah.ColumnRoots, - }[errByz.Axis][errByz.Index] - sharesWithProof, err := GetProofsForShares( - ctx, - bGetter, - ipld.MustCidFromNamespacedSha256(root), - errByz.Shares, - ) - if err != nil { + dah.RowRoots, + }[errByz.Axis] + + sharesWithProof := make([]*ShareWithProof, len(errByz.Shares)) + sharesAmount := 0 + + errGr, ctx := errgroup.WithContext(ctx) + for index, share := range errByz.Shares { + // skip further shares if we already requested half of them, which is enough to recompute the row + // or col + if sharesAmount == len(dah.RowRoots)/2 { + break + } + + if share == nil { + continue + } + sharesAmount++ + + index := index + errGr.Go(func() error { + share, err := getProofsAt( + ctx, bGetter, + ipld.MustCidFromNamespacedSha256(roots[index]), + int(errByz.Index), len(errByz.Shares), + ) + sharesWithProof[index] = share + return err + }) + } + + if err := errGr.Wait(); err != nil { // Fatal as rsmt2d proved that error is byzantine, // but we cannot properly collect the proof, // so verification will fail and thus services won't be stopped @@ -53,7 +78,6 @@ func NewErrByzantine( // TODO(@Wondertan): Find a better way to handle log.Fatalw("getting proof for ErrByzantine", "err", err) } - return &ErrByzantine{ Index: uint32(errByz.Index), Shares: sharesWithProof, diff --git a/share/eds/byzantine/pb/share.pb.go b/share/eds/byzantine/pb/share.pb.go index 33b9cdd1ab..4186eabc64 100644 --- a/share/eds/byzantine/pb/share.pb.go +++ b/share/eds/byzantine/pb/share.pb.go @@ -5,6 +5,7 @@ package share_eds_byzantine_pb import ( fmt "fmt" + pb "github.com/celestiaorg/nmt/pb" proto "github.com/gogo/protobuf/proto" io "io" math "math" @@ -47,84 +48,16 @@ func (Axis) EnumDescriptor() ([]byte, []int) { return fileDescriptor_d28ce8f160a920d1, []int{0} } -type MerkleProof struct { - Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` - End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` - Nodes [][]byte `protobuf:"bytes,3,rep,name=nodes,proto3" json:"nodes,omitempty"` - LeafHash []byte `protobuf:"bytes,4,opt,name=leaf_hash,json=leafHash,proto3" json:"leaf_hash,omitempty"` -} - -func (m *MerkleProof) Reset() { *m = MerkleProof{} } -func (m *MerkleProof) String() string { return proto.CompactTextString(m) } -func (*MerkleProof) ProtoMessage() {} -func (*MerkleProof) Descriptor() ([]byte, []int) { - return fileDescriptor_d28ce8f160a920d1, []int{0} -} -func (m *MerkleProof) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MerkleProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MerkleProof.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MerkleProof) XXX_Merge(src proto.Message) { - xxx_messageInfo_MerkleProof.Merge(m, src) -} -func (m *MerkleProof) XXX_Size() int { - return m.Size() -} -func (m *MerkleProof) XXX_DiscardUnknown() { - xxx_messageInfo_MerkleProof.DiscardUnknown(m) -} - -var xxx_messageInfo_MerkleProof proto.InternalMessageInfo - -func (m *MerkleProof) GetStart() int64 { - if m != nil { - return m.Start - } - return 0 -} - -func (m *MerkleProof) GetEnd() int64 { - if m != nil { - return m.End - } - return 0 -} - -func (m *MerkleProof) GetNodes() [][]byte { - if m != nil { - return m.Nodes - } - return nil -} - -func (m *MerkleProof) GetLeafHash() []byte { - if m != nil { - return m.LeafHash - } - return nil -} - type Share struct { - Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` - Proof *MerkleProof `protobuf:"bytes,2,opt,name=Proof,proto3" json:"Proof,omitempty"` + Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` + Proof *pb.Proof `protobuf:"bytes,2,opt,name=Proof,proto3" json:"Proof,omitempty"` } func (m *Share) Reset() { *m = Share{} } func (m *Share) String() string { return proto.CompactTextString(m) } func (*Share) ProtoMessage() {} func (*Share) Descriptor() ([]byte, []int) { - return fileDescriptor_d28ce8f160a920d1, []int{1} + return fileDescriptor_d28ce8f160a920d1, []int{0} } func (m *Share) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -160,7 +93,7 @@ func (m *Share) GetData() []byte { return nil } -func (m *Share) GetProof() *MerkleProof { +func (m *Share) GetProof() *pb.Proof { if m != nil { return m.Proof } @@ -179,7 +112,7 @@ func (m *BadEncoding) Reset() { *m = BadEncoding{} } func (m *BadEncoding) String() string { return proto.CompactTextString(m) } func (*BadEncoding) ProtoMessage() {} func (*BadEncoding) Descriptor() ([]byte, []int) { - return fileDescriptor_d28ce8f160a920d1, []int{2} + return fileDescriptor_d28ce8f160a920d1, []int{1} } func (m *BadEncoding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -245,7 +178,6 @@ func (m *BadEncoding) GetAxis() Axis { func init() { proto.RegisterEnum("share.eds.byzantine.pb.Axis", Axis_name, Axis_value) - proto.RegisterType((*MerkleProof)(nil), "share.eds.byzantine.pb.MerkleProof") proto.RegisterType((*Share)(nil), "share.eds.byzantine.pb.Share") proto.RegisterType((*BadEncoding)(nil), "share.eds.byzantine.pb.BadEncoding") } @@ -255,78 +187,26 @@ func init() { } var fileDescriptor_d28ce8f160a920d1 = []byte{ - // 347 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x41, 0x4b, 0xf3, 0x30, - 0x1c, 0xc6, 0x9b, 0xb7, 0xed, 0xde, 0xf7, 0xfd, 0x77, 0xca, 0x08, 0x32, 0x02, 0x6a, 0x28, 0xf5, - 0x52, 0x3c, 0xb4, 0x32, 0xf1, 0xe0, 0xd1, 0xa9, 0x30, 0x41, 0x99, 0x44, 0xd0, 0xa3, 0xa4, 0x26, - 0x5b, 0x8b, 0x23, 0x1d, 0x4d, 0x0f, 0xd3, 0x4f, 0xe1, 0x87, 0xf2, 0xe0, 0x71, 0x47, 0x8f, 0xb2, - 0x7d, 0x11, 0x49, 0x3a, 0x64, 0x07, 0x77, 0xfb, 0x3f, 0x0f, 0x4f, 0xf2, 0xfc, 0xfe, 0x09, 0x44, - 0x3a, 0xe7, 0x95, 0x4c, 0xa5, 0xd0, 0x69, 0xf6, 0xf2, 0xca, 0x55, 0x5d, 0x28, 0x99, 0x4e, 0xb3, - 0xd4, 0xda, 0xc9, 0xb4, 0x2a, 0xeb, 0x12, 0x77, 0x1b, 0x21, 0x85, 0x4e, 0x7e, 0x32, 0xc9, 0x34, - 0x8b, 0x72, 0x08, 0x6e, 0x64, 0xf5, 0x3c, 0x91, 0xb7, 0x55, 0x59, 0x8e, 0xf0, 0x0e, 0xf8, 0xba, - 0xe6, 0x55, 0x4d, 0x50, 0x88, 0x62, 0x97, 0x35, 0x02, 0x77, 0xc0, 0x95, 0x4a, 0x90, 0x3f, 0xd6, - 0x33, 0xa3, 0xc9, 0xa9, 0x52, 0x48, 0x4d, 0xdc, 0xd0, 0x8d, 0xdb, 0xac, 0x11, 0x78, 0x17, 0xfe, - 0x4f, 0x24, 0x1f, 0x3d, 0xe6, 0x5c, 0xe7, 0xc4, 0x0b, 0x51, 0xdc, 0x66, 0xff, 0x8c, 0x31, 0xe0, - 0x3a, 0x8f, 0xee, 0xc1, 0xbf, 0x33, 0x0c, 0x18, 0x83, 0x77, 0xc1, 0x6b, 0x6e, 0x2b, 0xda, 0xcc, - 0xce, 0xf8, 0x14, 0x7c, 0x0b, 0x60, 0x3b, 0x82, 0xde, 0x41, 0xf2, 0x3b, 0x6e, 0xb2, 0xc6, 0xca, - 0x9a, 0x13, 0xd1, 0x3b, 0x82, 0xa0, 0xcf, 0xc5, 0xa5, 0x7a, 0x2a, 0x45, 0xa1, 0xc6, 0x98, 0x02, - 0x0c, 0x24, 0x17, 0xb2, 0x32, 0xad, 0xab, 0x92, 0x35, 0x07, 0x77, 0xa1, 0x35, 0x90, 0xc5, 0x38, - 0xaf, 0x6d, 0x97, 0xc7, 0x56, 0x0a, 0x9f, 0x40, 0xcb, 0xf2, 0x35, 0x3b, 0x05, 0xbd, 0xfd, 0x4d, - 0x0c, 0x36, 0xc5, 0x56, 0x61, 0xf3, 0x12, 0x57, 0x4a, 0xc8, 0x99, 0xdd, 0x77, 0x8b, 0x35, 0x02, - 0x1f, 0x81, 0x77, 0x36, 0x2b, 0x34, 0xf1, 0x43, 0x14, 0x6f, 0xf7, 0xf6, 0x36, 0x5d, 0xc5, 0x67, - 0x85, 0x66, 0x36, 0x79, 0x48, 0xc0, 0x33, 0x0a, 0xff, 0x05, 0x97, 0x0d, 0x1f, 0x3a, 0x8e, 0x19, - 0xce, 0x87, 0xd7, 0x1d, 0xd4, 0x27, 0x1f, 0x0b, 0x8a, 0xe6, 0x0b, 0x8a, 0xbe, 0x16, 0x14, 0xbd, - 0x2d, 0xa9, 0x33, 0x5f, 0x52, 0xe7, 0x73, 0x49, 0x9d, 0xac, 0x65, 0xff, 0xf6, 0xf8, 0x3b, 0x00, - 0x00, 0xff, 0xff, 0x0f, 0xa3, 0x91, 0xb4, 0x01, 0x02, 0x00, 0x00, -} - -func (m *MerkleProof) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MerkleProof) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MerkleProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LeafHash) > 0 { - i -= len(m.LeafHash) - copy(dAtA[i:], m.LeafHash) - i = encodeVarintShare(dAtA, i, uint64(len(m.LeafHash))) - i-- - dAtA[i] = 0x22 - } - if len(m.Nodes) > 0 { - for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Nodes[iNdEx]) - copy(dAtA[i:], m.Nodes[iNdEx]) - i = encodeVarintShare(dAtA, i, uint64(len(m.Nodes[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if m.End != 0 { - i = encodeVarintShare(dAtA, i, uint64(m.End)) - i-- - dAtA[i] = 0x10 - } - if m.Start != 0 { - i = encodeVarintShare(dAtA, i, uint64(m.Start)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + // 297 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0xc1, 0x4a, 0xec, 0x30, + 0x14, 0x86, 0x9b, 0x3b, 0xed, 0x5c, 0x38, 0xa3, 0xe3, 0x10, 0x64, 0x08, 0xa2, 0xa1, 0x0c, 0x08, + 0xc5, 0x45, 0x2a, 0x15, 0x1f, 0xc0, 0xaa, 0x50, 0x41, 0x18, 0x89, 0x0b, 0xd7, 0x89, 0x89, 0xd3, + 0x6e, 0xda, 0xd2, 0x74, 0x51, 0x7d, 0x0a, 0x1f, 0xca, 0x85, 0xcb, 0x59, 0xba, 0x94, 0xf6, 0x45, + 0xa4, 0x69, 0x11, 0x17, 0xba, 0x3b, 0xdf, 0x9f, 0x2f, 0x3f, 0xc9, 0x81, 0x95, 0x49, 0x45, 0xa5, + 0x43, 0xad, 0x4c, 0x28, 0x9f, 0x5f, 0x44, 0x5e, 0x67, 0xb9, 0x0e, 0x4b, 0x19, 0xda, 0x98, 0x95, + 0x55, 0x51, 0x17, 0x78, 0x39, 0x80, 0x56, 0x86, 0x7d, 0x3b, 0xac, 0x94, 0x07, 0xf3, 0x52, 0x86, + 0x65, 0x55, 0x14, 0x4f, 0x83, 0xb7, 0x8a, 0xc1, 0xbb, 0xef, 0x4d, 0x8c, 0xc1, 0xbd, 0x12, 0xb5, + 0x20, 0xc8, 0x47, 0xc1, 0x0e, 0xb7, 0x33, 0x3e, 0x06, 0xef, 0xae, 0x77, 0xc9, 0x3f, 0x1f, 0x05, + 0xb3, 0x68, 0x8f, 0x8d, 0x37, 0x25, 0xb3, 0x31, 0x1f, 0x4e, 0x57, 0x6f, 0x08, 0x66, 0xb1, 0x50, + 0xd7, 0xf9, 0x63, 0xa1, 0xb2, 0x7c, 0x83, 0x29, 0x40, 0xa2, 0x85, 0xd2, 0x55, 0x22, 0x4c, 0x3a, + 0x16, 0xfe, 0x48, 0xf0, 0x12, 0xa6, 0x89, 0xce, 0x36, 0x69, 0x6d, 0x7b, 0x5d, 0x3e, 0x12, 0x3e, + 0x87, 0xa9, 0x7d, 0x8b, 0x21, 0x13, 0x7f, 0x12, 0xcc, 0xa2, 0x23, 0xf6, 0xfb, 0x27, 0x98, 0xb5, + 0xf8, 0x28, 0xe3, 0x7d, 0xf0, 0x6e, 0x72, 0xa5, 0x1b, 0xe2, 0xfa, 0x28, 0xd8, 0xe5, 0x03, 0xe0, + 0x53, 0x70, 0x2f, 0x9a, 0xcc, 0x10, 0xcf, 0x47, 0xc1, 0x3c, 0x3a, 0xfc, 0xab, 0x4a, 0x34, 0x99, + 0xe1, 0xd6, 0x3c, 0x21, 0xe0, 0xf6, 0x84, 0xff, 0xc3, 0x84, 0xaf, 0x1f, 0x16, 0x4e, 0x3f, 0x5c, + 0xae, 0x6f, 0x17, 0x28, 0x26, 0xef, 0x2d, 0x45, 0xdb, 0x96, 0xa2, 0xcf, 0x96, 0xa2, 0xd7, 0x8e, + 0x3a, 0xdb, 0x8e, 0x3a, 0x1f, 0x1d, 0x75, 0xe4, 0xd4, 0x6e, 0xf1, 0xec, 0x2b, 0x00, 0x00, 0xff, + 0xff, 0xb1, 0x96, 0xb9, 0xbe, 0x93, 0x01, 0x00, 0x00, } func (m *Share) Marshal() (dAtA []byte, err error) { @@ -441,31 +321,6 @@ func encodeVarintShare(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *MerkleProof) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Start != 0 { - n += 1 + sovShare(uint64(m.Start)) - } - if m.End != 0 { - n += 1 + sovShare(uint64(m.End)) - } - if len(m.Nodes) > 0 { - for _, b := range m.Nodes { - l = len(b) - n += 1 + l + sovShare(uint64(l)) - } - } - l = len(m.LeafHash) - if l > 0 { - n += 1 + l + sovShare(uint64(l)) - } - return n -} - func (m *Share) Size() (n int) { if m == nil { return 0 @@ -517,160 +372,6 @@ func sovShare(x uint64) (n int) { func sozShare(x uint64) (n int) { return sovShare(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *MerkleProof) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MerkleProof: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MerkleProof: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) - } - m.Start = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Start |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) - } - m.End = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.End |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthShare - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthShare - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Nodes = append(m.Nodes, make([]byte, postIndex-iNdEx)) - copy(m.Nodes[len(m.Nodes)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeafHash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthShare - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthShare - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LeafHash = append(m.LeafHash[:0], dAtA[iNdEx:postIndex]...) - if m.LeafHash == nil { - m.LeafHash = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShare(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShare - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *Share) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -764,7 +465,7 @@ func (m *Share) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Proof == nil { - m.Proof = &MerkleProof{} + m.Proof = &pb.Proof{} } if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err diff --git a/share/eds/byzantine/pb/share.proto b/share/eds/byzantine/pb/share.proto index e08dffae45..33e3dae2c2 100644 --- a/share/eds/byzantine/pb/share.proto +++ b/share/eds/byzantine/pb/share.proto @@ -1,17 +1,11 @@ syntax = "proto3"; package share.eds.byzantine.pb; - -message MerkleProof { - int64 start = 1; - int64 end = 2; - repeated bytes nodes = 3; - bytes leaf_hash = 4; -} +import "pb/proof.proto"; message Share { bytes Data = 1; - MerkleProof Proof = 2; + proof.pb.Proof Proof = 2; } enum axis { diff --git a/share/eds/byzantine/share_proof.go b/share/eds/byzantine/share_proof.go index 86eaad54a8..411fd85818 100644 --- a/share/eds/byzantine/share_proof.go +++ b/share/eds/byzantine/share_proof.go @@ -2,13 +2,14 @@ package byzantine import ( "context" + "crypto/sha256" "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" - "github.com/minio/sha256-simd" "github.com/celestiaorg/nmt" + nmt_pb "github.com/celestiaorg/nmt/pb" "github.com/celestiaorg/celestia-node/share" pb "github.com/celestiaorg/celestia-node/share/eds/byzantine/pb" @@ -45,8 +46,8 @@ func NewShareWithProof(index int, share share.Share, pathToLeaf []cid.Cid) *Shar func (s *ShareWithProof) Validate(root cid.Cid) bool { return s.Proof.VerifyInclusion( sha256.New(), // TODO(@Wondertan): This should be defined somewhere globally - share.ID(s.Share), - [][]byte{share.Data(s.Share)}, + share.GetNamespace(s.Share).ToNMT(), + [][]byte{share.GetData(s.Share)}, ipld.NamespacedSha256FromCID(root), ) } @@ -58,11 +59,12 @@ func (s *ShareWithProof) ShareWithProofToProto() *pb.Share { return &pb.Share{ Data: s.Share, - Proof: &pb.MerkleProof{ - Start: int64(s.Proof.Start()), - End: int64(s.Proof.End()), - Nodes: s.Proof.Nodes(), - LeafHash: s.Proof.LeafHash(), + Proof: &nmt_pb.Proof{ + Start: int64(s.Proof.Start()), + End: int64(s.Proof.End()), + Nodes: s.Proof.Nodes(), + LeafHash: s.Proof.LeafHash(), + IsMaxNamespaceIgnored: s.Proof.IsMaxNamespaceIDIgnored(), }, } } @@ -78,24 +80,38 @@ func GetProofsForShares( proofs := make([]*ShareWithProof, len(shares)) for index, share := range shares { if share != nil { - proof := make([]cid.Cid, 0) - // TODO(@vgonkivs): Combine GetLeafData and GetProof in one function as the are traversing the same - // tree. Add options that will control what data will be fetched. - s, err := ipld.GetLeaf(ctx, bGetter, root, index, len(shares)) + proof, err := getProofsAt(ctx, bGetter, root, index, len(shares)) if err != nil { return nil, err } - proof, err = ipld.GetProof(ctx, bGetter, root, proof, index, len(shares)) - if err != nil { - return nil, err - } - proofs[index] = NewShareWithProof(index, s.RawData(), proof) + proofs[index] = proof } } - return proofs, nil } +func getProofsAt( + ctx context.Context, + bGetter blockservice.BlockGetter, + root cid.Cid, + index, + total int, +) (*ShareWithProof, error) { + proof := make([]cid.Cid, 0) + // TODO(@vgonkivs): Combine GetLeafData and GetProof in one function as the are traversing the same + // tree. Add options that will control what data will be fetched. + node, err := ipld.GetLeaf(ctx, bGetter, root, index, total) + if err != nil { + return nil, err + } + + proof, err = ipld.GetProof(ctx, bGetter, root, proof, index, total) + if err != nil { + return nil, err + } + return NewShareWithProof(index, node.RawData(), proof), nil +} + func ProtoToShare(protoShares []*pb.Share) []*ShareWithProof { shares := make([]*ShareWithProof, len(protoShares)) for i, share := range protoShares { @@ -108,6 +124,11 @@ func ProtoToShare(protoShares []*pb.Share) []*ShareWithProof { return shares } -func ProtoToProof(protoProof *pb.MerkleProof) nmt.Proof { - return nmt.NewInclusionProof(int(protoProof.Start), int(protoProof.End), protoProof.Nodes, ipld.NMTIgnoreMaxNamespace) +func ProtoToProof(protoProof *nmt_pb.Proof) nmt.Proof { + return nmt.NewInclusionProof( + int(protoProof.Start), + int(protoProof.End), + protoProof.Nodes, + protoProof.IsMaxNamespaceIgnored, + ) } diff --git a/share/eds/byzantine/share_proof_test.go b/share/eds/byzantine/share_proof_test.go index 9cffe6eb18..db1db64f80 100644 --- a/share/eds/byzantine/share_proof_test.go +++ b/share/eds/byzantine/share_proof_test.go @@ -12,8 +12,8 @@ import ( "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" ) func TestGetProof(t *testing.T) { @@ -23,11 +23,12 @@ func TestGetProof(t *testing.T) { defer cancel() bServ := mdutils.Bserv() - shares := share.RandShares(t, width*width) - in, err := share.AddShares(ctx, shares, bServ) + shares := sharetest.RandShares(t, width*width) + in, err := ipld.AddShares(ctx, shares, bServ) require.NoError(t, err) - dah := da.NewDataAvailabilityHeader(in) + dah, err := da.NewDataAvailabilityHeader(in) + require.NoError(t, err) var tests = []struct { roots [][]byte }{ @@ -59,11 +60,12 @@ func TestGetProofs(t *testing.T) { defer cancel() bServ := mdutils.Bserv() - shares := share.RandShares(t, width*width) - in, err := share.AddShares(ctx, shares, bServ) + shares := sharetest.RandShares(t, width*width) + in, err := ipld.AddShares(ctx, shares, bServ) require.NoError(t, err) - dah := da.NewDataAvailabilityHeader(in) + dah, err := da.NewDataAvailabilityHeader(in) + require.NoError(t, err) for _, root := range dah.ColumnRoots { rootCid := ipld.MustCidFromNamespacedSha256(root) data := make([][]byte, 0, in.Width()) diff --git a/share/eds/eds.go b/share/eds/eds.go index 4e96fd684e..e689aec31c 100644 --- a/share/eds/eds.go +++ b/share/eds/eds.go @@ -3,24 +3,17 @@ package eds import ( "bytes" "context" + "crypto/sha256" "errors" "fmt" "io" "math" - "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" - bstore "github.com/ipfs/go-ipfs-blockstore" - format "github.com/ipfs/go-ipld-format" "github.com/ipld/go-car" "github.com/ipld/go-car/util" - "github.com/minio/sha256-simd" - "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-app/pkg/namespace" "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" @@ -32,15 +25,6 @@ import ( var ErrEmptySquare = errors.New("share: importing empty data") -// writingSession contains the components needed to write an EDS to a CARv1 file with our custom -// node order. -type writingSession struct { - eds *rsmt2d.ExtendedDataSquare - store bstore.Blockstore // caches inner nodes (proofs) while we walk the nmt tree. - hasher *nmt.Hasher - w io.Writer -} - // WriteEDS writes the entire EDS into the given io.Writer as CARv1 file. // This includes all shares in quadrant order, followed by all inner nodes of the NMT tree. // Order: [ Carv1Header | Q1 | Q2 | Q3 | Q4 | inner nodes ] @@ -51,77 +35,28 @@ func WriteEDS(ctx context.Context, eds *rsmt2d.ExtendedDataSquare, w io.Writer) utils.SetStatusAndEnd(span, err) }() - // 1. Reimport EDS. This is needed to traverse the NMT tree and cache the inner nodes (proofs) - writer, err := initializeWriter(ctx, eds, w) - if err != nil { - return fmt.Errorf("share: creating eds writer: %w", err) - } - - // 2. Creates and writes Carv1Header - // - Roots are the eds Row + Col roots - err = writer.writeHeader() + // Creates and writes Carv1Header. Roots are the eds Row + Col roots + err = writeHeader(eds, w) if err != nil { return fmt.Errorf("share: writing carv1 header: %w", err) } - - // 3. Iterates over shares in quadrant order via eds.GetCell - err = writer.writeQuadrants() + // Iterates over shares in quadrant order via eds.GetCell + err = writeQuadrants(eds, w) if err != nil { return fmt.Errorf("share: writing shares: %w", err) } - // 4. Iterates over in-memory blockstore and writes proofs to the CAR - err = writer.writeProofs(ctx) + // Iterates over proofs and writes them to the CAR + err = writeProofs(ctx, eds, w) if err != nil { return fmt.Errorf("share: writing proofs: %w", err) } return nil } -// initializeWriter reimports the EDS into an in-memory blockstore in order to cache the proofs. -func initializeWriter(ctx context.Context, eds *rsmt2d.ExtendedDataSquare, w io.Writer) (*writingSession, error) { - // we use an in-memory blockstore and an offline exchange - store := bstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - bs := blockservice.New(store, nil) - // shares are extracted from the eds so that we can reimport them to traverse - shares := share.ExtractEDS(eds) - shareCount := len(shares) - if shareCount == 0 { - return nil, ErrEmptySquare - } - odsWidth := int(math.Sqrt(float64(shareCount)) / 2) - // (shareCount*2) - (odsWidth*4) is the amount of inner nodes visited - batchAdder := ipld.NewNmtNodeAdder(ctx, bs, format.MaxSizeBatchOption(innerNodeBatchSize(shareCount, odsWidth))) - // this adder ignores leaves, so that they are not added to the store we iterate through in - // writeProofs - eds, err := rsmt2d.ImportExtendedDataSquare( - shares, - share.DefaultRSMT2DCodec(), - wrapper.NewConstructor(uint64(odsWidth), - nmt.NodeVisitor(batchAdder.VisitInnerNodes)), - ) - if err != nil { - return nil, fmt.Errorf("recomputing data square: %w", err) - } - // compute roots - eds.RowRoots() - // commit the batch to DAG - err = batchAdder.Commit() - if err != nil { - return nil, fmt.Errorf("committing inner nodes to the dag: %w", err) - } - - return &writingSession{ - eds: eds, - store: store, - hasher: nmt.NewNmtHasher(sha256.New(), ipld.NamespaceSize, ipld.NMTIgnoreMaxNamespace), - w: w, - }, nil -} - // writeHeader creates a CarV1 header using the EDS's Row and Column roots as the list of DAG roots. -func (w *writingSession) writeHeader() error { - rootCids, err := rootsToCids(w.eds) +func writeHeader(eds *rsmt2d.ExtendedDataSquare, w io.Writer) error { + rootCids, err := rootsToCids(eds) if err != nil { return fmt.Errorf("getting root cids: %w", err) } @@ -129,14 +64,15 @@ func (w *writingSession) writeHeader() error { return car.WriteHeader(&car.CarHeader{ Roots: rootCids, Version: 1, - }, w.w) + }, w) } // writeQuadrants reorders the shares to quadrant order and writes them to the CARv1 file. -func (w *writingSession) writeQuadrants() error { - shares := quadrantOrder(w.eds) +func writeQuadrants(eds *rsmt2d.ExtendedDataSquare, w io.Writer) error { + hasher := nmt.NewNmtHasher(sha256.New(), share.NamespaceSize, ipld.NMTIgnoreMaxNamespace) + shares := quadrantOrder(eds) for _, share := range shares { - leaf, err := w.hasher.HashLeaf(share) + leaf, err := hasher.HashLeaf(share) if err != nil { return fmt.Errorf("hashing share: %w", err) } @@ -144,7 +80,7 @@ func (w *writingSession) writeQuadrants() error { if err != nil { return fmt.Errorf("getting cid from share: %w", err) } - err = util.LdWrite(w.w, cid.Bytes(), share) + err = util.LdWrite(w, cid.Bytes(), share) if err != nil { return fmt.Errorf("writing share to file: %w", err) } @@ -154,30 +90,15 @@ func (w *writingSession) writeQuadrants() error { // writeProofs iterates over the in-memory blockstore's keys and writes all inner nodes to the // CARv1 file. -func (w *writingSession) writeProofs(ctx context.Context) error { - // we only stored proofs to the store, so we can just iterate over them here without getting any - // leaves - proofs, err := w.store.AllKeysChan(ctx) +func writeProofs(ctx context.Context, eds *rsmt2d.ExtendedDataSquare, w io.Writer) error { + // check if proofs are collected by ipld.ProofsAdder in previous reconstructions of eds + proofs, err := getProofs(ctx, eds) if err != nil { - return fmt.Errorf("getting all keys from the blockstore: %w", err) + return fmt.Errorf("recomputing proofs: %w", err) } - for proofCid := range proofs { - block, err := w.store.Get(ctx, proofCid) - if err != nil { - return fmt.Errorf("getting proof from the blockstore: %w", err) - } - node := block.RawData() - left, right := node[:ipld.NmtHashSize], node[ipld.NmtHashSize:] - hash, err := w.hasher.HashNode(left, right) - if err != nil { - return fmt.Errorf("hashing node: %w", err) - } - cid, err := ipld.CidFromNamespacedSha256(hash) - if err != nil { - return fmt.Errorf("getting cid: %w", err) - } - err = util.LdWrite(w.w, cid.Bytes(), node) + for id, proof := range proofs { + err := util.LdWrite(w, id.Bytes(), proof) if err != nil { return fmt.Errorf("writing proof to the car: %w", err) } @@ -185,6 +106,43 @@ func (w *writingSession) writeProofs(ctx context.Context) error { return nil } +func getProofs(ctx context.Context, eds *rsmt2d.ExtendedDataSquare) (map[cid.Cid][]byte, error) { + // check if there are proofs collected by ipld.ProofsAdder in previous reconstruction of eds + if adder := ipld.ProofsAdderFromCtx(ctx); adder != nil { + defer adder.Purge() + return adder.Proofs(), nil + } + + // recompute proofs from eds + shares := eds.Flattened() + shareCount := len(shares) + if shareCount == 0 { + return nil, ErrEmptySquare + } + odsWidth := int(math.Sqrt(float64(shareCount)) / 2) + + // this adder ignores leaves, so that they are not added to the store we iterate through in + // writeProofs + adder := ipld.NewProofsAdder(odsWidth * 2) + defer adder.Purge() + + eds, err := rsmt2d.ImportExtendedDataSquare( + shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(odsWidth), + nmt.NodeVisitor(adder.VisitFn())), + ) + if err != nil { + return nil, fmt.Errorf("recomputing data square: %w", err) + } + // compute roots + if _, err = eds.RowRoots(); err != nil { + return nil, fmt.Errorf("computing row roots: %w", err) + } + + return adder.Proofs(), nil +} + // quadrantOrder reorders the shares in the EDS to quadrant row-by-row order, prepending the // respective namespace to the shares. // e.g. [ Q1 R1 | Q1 R2 | Q1 R3 | Q1 R4 | Q2 R1 | Q2 R2 .... ] @@ -219,13 +177,13 @@ func getQuadrantCells(eds *rsmt2d.ExtendedDataSquare, i, j uint) [][]byte { // prependNamespace adds the namespace to the passed share if in the first quadrant, // otherwise it adds the ParitySharesNamespace to the beginning. -func prependNamespace(quadrant int, share []byte) []byte { - namespacedShare := make([]byte, 0, appconsts.NamespaceSize+appconsts.ShareSize) +func prependNamespace(quadrant int, shr share.Share) []byte { + namespacedShare := make([]byte, 0, share.NamespaceSize+share.Size) switch quadrant { case 0: - return append(append(namespacedShare, share[:ipld.NamespaceSize]...), share...) + return append(append(namespacedShare, share.GetNamespace(shr)...), shr...) case 1, 2, 3: - return append(append(namespacedShare, namespace.ParitySharesNamespace.Bytes()...), share...) + return append(append(namespacedShare, share.ParitySharesNamespace...), shr...) default: panic("invalid quadrant") } @@ -233,8 +191,18 @@ func prependNamespace(quadrant int, share []byte) []byte { // rootsToCids converts the EDS's Row and Column roots to CIDs. func rootsToCids(eds *rsmt2d.ExtendedDataSquare) ([]cid.Cid, error) { - var err error - roots := append(eds.RowRoots(), eds.ColRoots()...) + rowRoots, err := eds.RowRoots() + if err != nil { + return nil, err + } + colRoots, err := eds.ColRoots() + if err != nil { + return nil, err + } + + roots := make([][]byte, 0, len(rowRoots)+len(colRoots)) + roots = append(roots, rowRoots...) + roots = append(roots, colRoots...) rootCids := make([]cid.Cid, len(roots)) for i, r := range roots { rootCids[i], err = ipld.CidFromNamespacedSha256(r) @@ -273,19 +241,29 @@ func ReadEDS(ctx context.Context, r io.Reader, root share.DataHash) (eds *rsmt2d } // the stored first quadrant shares are wrapped with the namespace twice. // we cut it off here, because it is added again while importing to the tree below - shares[i] = block.RawData()[ipld.NamespaceSize:] + shares[i] = share.GetData(block.RawData()) + } + + // use proofs adder if provided, to cache collected proofs while recomputing the eds + var opts []nmt.Option + visitor := ipld.ProofsAdderFromCtx(ctx).VisitFn() + if visitor != nil { + opts = append(opts, nmt.NodeVisitor(visitor)) } eds, err = rsmt2d.ComputeExtendedDataSquare( shares, share.DefaultRSMT2DCodec(), - wrapper.NewConstructor(uint64(odsWidth)), + wrapper.NewConstructor(uint64(odsWidth), opts...), ) if err != nil { return nil, fmt.Errorf("share: computing eds: %w", err) } - newDah := da.NewDataAvailabilityHeader(eds) + newDah, err := da.NewDataAvailabilityHeader(eds) + if err != nil { + return nil, err + } if !bytes.Equal(newDah.Hash(), root) { return nil, fmt.Errorf( "share: content integrity mismatch: imported root %s doesn't match expected root %s", @@ -295,9 +273,3 @@ func ReadEDS(ctx context.Context, r io.Reader, root share.DataHash) (eds *rsmt2d } return eds, nil } - -// innerNodeBatchSize calculates the total number of inner nodes in an EDS, -// to be flushed to the dagstore in a single write. -func innerNodeBatchSize(shareCount int, odsWidth int) int { - return (shareCount * 2) - (odsWidth * 4) -} diff --git a/share/eds/eds_test.go b/share/eds/eds_test.go index ea0f06c138..6ebca8b779 100644 --- a/share/eds/eds_test.go +++ b/share/eds/eds_test.go @@ -9,8 +9,8 @@ import ( "os" "testing" + bstore "github.com/ipfs/boxo/blockstore" ds "github.com/ipfs/go-datastore" - bstore "github.com/ipfs/go-ipfs-blockstore" carv1 "github.com/ipld/go-car" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -18,11 +18,10 @@ import ( "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-app/pkg/namespace" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/eds/edstest" ) //go:embed "testdata/example-root.json" @@ -57,7 +56,7 @@ func TestQuadrantOrder(t *testing.T) { res := quadrantOrder(eds) for _, s := range res { - require.Len(t, s, testShareSize+namespace.NamespaceSize) + require.Len(t, s, testShareSize+share.NamespaceSize) } for q := 0; q < 4; q++ { @@ -101,7 +100,7 @@ func TestWriteEDSStartsWithLeaves(t *testing.T) { block, err := reader.Next() require.NoError(t, err, "error getting first block") - require.Equal(t, block.RawData()[ipld.NamespaceSize:], eds.GetCell(0, 0)) + require.Equal(t, share.GetData(block.RawData()), eds.GetCell(0, 0)) } func TestWriteEDSIncludesRoots(t *testing.T) { @@ -137,44 +136,27 @@ func TestWriteEDSInQuadrantOrder(t *testing.T) { } } -// TestInnerNodeBatchSize verifies that the number of unique inner nodes is equal to ipld.BatchSize -// - shareCount. -func TestInnerNodeBatchSize(t *testing.T) { - tests := []struct { - name string - origWidth int - }{ - {"2", 2}, - {"4", 4}, - {"8", 8}, - {"16", 16}, - {"32", 32}, - // {"64", 64}, // test case too large for CI with race detector - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - extendedWidth := tt.origWidth * 2 - shareCount := extendedWidth * extendedWidth - assert.Equalf( - t, - innerNodeBatchSize(shareCount, tt.origWidth), - ipld.BatchSize(extendedWidth)-shareCount, - "batchSize(%v)", extendedWidth, - ) - }) - } -} - func TestReadWriteRoundtrip(t *testing.T) { eds := writeRandomEDS(t) - dah := da.NewDataAvailabilityHeader(eds) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) f := openWrittenEDS(t) defer f.Close() loaded, err := ReadEDS(context.Background(), f, dah.Hash()) require.NoError(t, err, "error reading EDS from file") - require.Equal(t, eds.RowRoots(), loaded.RowRoots()) - require.Equal(t, eds.ColRoots(), loaded.ColRoots()) + + rowRoots, err := eds.RowRoots() + require.NoError(t, err) + loadedRowRoots, err := loaded.RowRoots() + require.NoError(t, err) + require.Equal(t, rowRoots, loadedRowRoots) + + colRoots, err := eds.ColRoots() + require.NoError(t, err) + loadedColRoots, err := loaded.ColRoots() + require.NoError(t, err) + require.Equal(t, colRoots, loadedColRoots) } func TestReadEDS(t *testing.T) { @@ -187,17 +169,22 @@ func TestReadEDS(t *testing.T) { loaded, err := ReadEDS(context.Background(), f, dah.Hash()) require.NoError(t, err, "error reading EDS from file") - require.Equal(t, dah.RowRoots, loaded.RowRoots()) - require.Equal(t, dah.ColumnRoots, loaded.ColRoots()) + rowRoots, err := loaded.RowRoots() + require.NoError(t, err) + require.Equal(t, dah.RowRoots, rowRoots) + colRoots, err := loaded.ColRoots() + require.NoError(t, err) + require.Equal(t, dah.ColumnRoots, colRoots) } func TestReadEDSContentIntegrityMismatch(t *testing.T) { writeRandomEDS(t) - dah := da.NewDataAvailabilityHeader(share.RandEDS(t, 4)) + dah, err := da.NewDataAvailabilityHeader(edstest.RandEDS(t, 4)) + require.NoError(t, err) f := openWrittenEDS(t) defer f.Close() - _, err := ReadEDS(context.Background(), f, dah.Hash()) + _, err = ReadEDS(context.Background(), f, dah.Hash()) require.ErrorContains(t, err, "share: content integrity mismatch: imported root") } @@ -208,8 +195,9 @@ func BenchmarkReadWriteEDS(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) b.Cleanup(cancel) for originalDataWidth := 4; originalDataWidth <= 64; originalDataWidth *= 2 { - eds := share.RandEDS(b, originalDataWidth) - dah := da.NewDataAvailabilityHeader(eds) + eds := edstest.RandEDS(b, originalDataWidth) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(b, err) b.Run(fmt.Sprintf("Writing %dx%d", originalDataWidth, originalDataWidth), func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { @@ -242,7 +230,7 @@ func writeRandomEDS(t *testing.T) *rsmt2d.ExtendedDataSquare { f, err := os.OpenFile("test.car", os.O_WRONLY|os.O_CREATE, 0600) require.NoError(t, err, "error opening file") - eds := share.RandEDS(t, 4) + eds := edstest.RandEDS(t, 4) err = WriteEDS(ctx, eds, f) require.NoError(t, err, "error writing EDS to file") f.Close() @@ -276,11 +264,12 @@ func createTestData(t *testing.T, testDir string) { //nolint:unused f, err := os.OpenFile("example.car", os.O_WRONLY|os.O_CREATE, 0600) require.NoError(t, err, "opening file") - eds := share.RandEDS(t, 4) + eds := edstest.RandEDS(t, 4) err = WriteEDS(ctx, eds, f) require.NoError(t, err, "writing EDS to file") f.Close() - dah := da.NewDataAvailabilityHeader(eds) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) header, err := json.MarshalIndent(dah, "", "") require.NoError(t, err, "marshaling example root") diff --git a/share/eds/edstest/testing.go b/share/eds/edstest/testing.go new file mode 100644 index 0000000000..ddca285f0c --- /dev/null +++ b/share/eds/edstest/testing.go @@ -0,0 +1,45 @@ +package edstest + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func RandByzantineEDS(t *testing.T, size int) *rsmt2d.ExtendedDataSquare { + eds := RandEDS(t, size) + shares := eds.Flattened() + copy(share.GetData(shares[0]), share.GetData(shares[1])) // corrupting eds + eds, err := rsmt2d.ImportExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), wrapper.NewConstructor(uint64(size))) + require.NoError(t, err, "failure to recompute the extended data square") + return eds +} + +// RandEDS generates EDS filled with the random data with the given size for original square. It +// uses require.TestingT to be able to take both a *testing.T and a *testing.B. +func RandEDS(t require.TestingT, size int) *rsmt2d.ExtendedDataSquare { + shares := sharetest.RandShares(t, size*size) + eds, err := rsmt2d.ComputeExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), wrapper.NewConstructor(uint64(size))) + require.NoError(t, err, "failure to recompute the extended data square") + return eds +} + +func RandEDSWithNamespace( + t require.TestingT, + namespace share.Namespace, + size int, +) (*rsmt2d.ExtendedDataSquare, da.DataAvailabilityHeader) { + shares := sharetest.RandSharesWithNamespace(t, namespace, size*size) + eds, err := rsmt2d.ComputeExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), wrapper.NewConstructor(uint64(size))) + require.NoError(t, err, "failure to recompute the extended data square") + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) + return eds, dah +} diff --git a/share/eds/inverted_index.go b/share/eds/inverted_index.go index f917619676..8b9dcb5d95 100644 --- a/share/eds/inverted_index.go +++ b/share/eds/inverted_index.go @@ -2,16 +2,22 @@ package eds import ( "context" - "encoding/json" + "errors" "fmt" "github.com/filecoin-project/dagstore/index" "github.com/filecoin-project/dagstore/shard" ds "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" "github.com/multiformats/go-multihash" + + dsbadger "github.com/celestiaorg/go-ds-badger4" ) +const invertedIndexPath = "/inverted_index/" + +// ErrNotFoundInIndex is returned instead of ErrNotFound if the multihash doesn't exist in the index +var ErrNotFoundInIndex = fmt.Errorf("does not exist in index") + // simpleInvertedIndex is an inverted index that only stores a single shard key per multihash. Its // implementation is modified from the default upstream implementation in dagstore/index. type simpleInvertedIndex struct { @@ -21,10 +27,23 @@ type simpleInvertedIndex struct { // newSimpleInvertedIndex returns a new inverted index that only stores a single shard key per // multihash. This is because we use badger as a storage backend, so updates are expensive, and we // don't care which shard is used to serve a cid. -func newSimpleInvertedIndex(dts ds.Batching) *simpleInvertedIndex { - return &simpleInvertedIndex{ - ds: namespace.Wrap(dts, ds.NewKey("/inverted/index")), +func newSimpleInvertedIndex(storePath string) (*simpleInvertedIndex, error) { + opts := dsbadger.DefaultOptions // this should be copied + // turn off value log GC + opts.GcInterval = 0 + // 20 compactors show to have no hangups on put operation up to 40k blocks with eds size 128. + opts.NumCompactors = 20 + // use minimum amount of NumLevelZeroTables to trigger L0 compaction faster + opts.NumLevelZeroTables = 1 + // MaxLevels = 8 will allow the db to grow to ~11.1 TiB + opts.MaxLevels = 8 + + ds, err := dsbadger.NewDatastore(storePath+invertedIndexPath, &opts) + if err != nil { + return nil, fmt.Errorf("can't open Badger Datastore: %w", err) } + + return &simpleInvertedIndex{ds: ds}, nil } func (s *simpleInvertedIndex) AddMultihashesForShard( @@ -40,35 +59,20 @@ func (s *simpleInvertedIndex) AddMultihashesForShard( return fmt.Errorf("failed to create ds batch: %w", err) } - if err := mhIter.ForEach(func(mh multihash.Multihash) error { + err = mhIter.ForEach(func(mh multihash.Multihash) error { key := ds.NewKey(string(mh)) - ok, err := s.ds.Has(ctx, key) - if err != nil { - return fmt.Errorf("failed to check if value for multihash exists %s, err: %w", mh, err) - } - - if !ok { - bz, err := json.Marshal(sk) - if err != nil { - return fmt.Errorf("failed to marshal shard key to bytes: %w", err) - } - if err := batch.Put(ctx, key, bz); err != nil { - return fmt.Errorf("failed to put mh=%s, err=%w", mh, err) - } + if err := batch.Put(ctx, key, []byte(sk.String())); err != nil { + return fmt.Errorf("failed to put mh=%s, err=%w", mh, err) } - return nil - }); err != nil { + }) + if err != nil { return fmt.Errorf("failed to add index entry: %w", err) } if err := batch.Commit(ctx); err != nil { return fmt.Errorf("failed to commit batch: %w", err) } - - if err := s.ds.Sync(ctx, ds.Key{}); err != nil { - return fmt.Errorf("failed to sync puts: %w", err) - } return nil } @@ -76,13 +80,12 @@ func (s *simpleInvertedIndex) GetShardsForMultihash(ctx context.Context, mh mult key := ds.NewKey(string(mh)) sbz, err := s.ds.Get(ctx, key) if err != nil { - return nil, fmt.Errorf("failed to lookup index for mh %s, err: %w", mh, err) + return nil, errors.Join(ErrNotFoundInIndex, err) } - var shardKey shard.Key - if err := json.Unmarshal(sbz, &shardKey); err != nil { - return nil, fmt.Errorf("failed to unmarshal shard key for mh=%s, err=%w", mh, err) - } + return []shard.Key{shard.KeyFromString(string(sbz))}, nil +} - return []shard.Key{shardKey}, nil +func (s *simpleInvertedIndex) close() error { + return s.ds.Close() } diff --git a/share/eds/inverted_index_test.go b/share/eds/inverted_index_test.go index f228aa0d92..e83c2be267 100644 --- a/share/eds/inverted_index_test.go +++ b/share/eds/inverted_index_test.go @@ -5,8 +5,6 @@ import ( "testing" "github.com/filecoin-project/dagstore/shard" - "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" ) @@ -37,20 +35,21 @@ func TestMultihashesForShard(t *testing.T) { } mi := &mockIterator{mhs: mhs} - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - invertedIndex := newSimpleInvertedIndex(ds) + path := t.TempDir() + invertedIndex, err := newSimpleInvertedIndex(path) + require.NoError(t, err) // 1. Add all 3 multihashes to shard1 - err := invertedIndex.AddMultihashesForShard(ctx, mi, shard.KeyFromString("shard1")) + err = invertedIndex.AddMultihashesForShard(ctx, mi, shard.KeyFromString("shard1")) require.NoError(t, err) shardKeys, err := invertedIndex.GetShardsForMultihash(ctx, mhs[0]) require.NoError(t, err) require.Equal(t, []shard.Key{shard.KeyFromString("shard1")}, shardKeys) - // 2. Add mh1 to shard2, and ensure that mh1 still points to shard1 + // 2. Add mh1 to shard2, and ensure that mh1 no longer points to shard1 err = invertedIndex.AddMultihashesForShard(ctx, &mockIterator{mhs: mhs[:1]}, shard.KeyFromString("shard2")) require.NoError(t, err) shardKeys, err = invertedIndex.GetShardsForMultihash(ctx, mhs[0]) require.NoError(t, err) - require.Equal(t, []shard.Key{shard.KeyFromString("shard1")}, shardKeys) + require.Equal(t, []shard.Key{shard.KeyFromString("shard2")}, shardKeys) } diff --git a/share/eds/metrics.go b/share/eds/metrics.go new file mode 100644 index 0000000000..1f430bf688 --- /dev/null +++ b/share/eds/metrics.go @@ -0,0 +1,294 @@ +package eds + +import ( + "context" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" +) + +const ( + failedKey = "failed" + sizeKey = "eds_size" + cachedKey = "cached" + + putResultKey = "result" + putOK putResult = "ok" + putExists putResult = "exists" + putFailed putResult = "failed" + + opNameKey = "op" + longOpResultKey = "result" + longOpUnresolved longOpResult = "unresolved" + longOpOK longOpResult = "ok" + longOpFailed longOpResult = "failed" + + dagstoreShardStatusKey = "shard_status" +) + +var ( + meter = otel.Meter("eds_store") +) + +type putResult string + +type longOpResult string + +type metrics struct { + putTime metric.Float64Histogram + getCARTime metric.Float64Histogram + getCARBlockstoreTime metric.Float64Histogram + getDAHTime metric.Float64Histogram + removeTime metric.Float64Histogram + getTime metric.Float64Histogram + hasTime metric.Float64Histogram + listTime metric.Float64Histogram + getAccessorTime metric.Float64Histogram + + longOpTime metric.Float64Histogram + gcTime metric.Float64Histogram +} + +func (s *Store) WithMetrics() error { + putTime, err := meter.Float64Histogram("eds_store_put_time_histogram", + metric.WithDescription("eds store put time histogram(s)")) + if err != nil { + return err + } + + getCARTime, err := meter.Float64Histogram("eds_store_get_car_time_histogram", + metric.WithDescription("eds store get car time histogram(s)")) + if err != nil { + return err + } + + getCARBlockstoreTime, err := meter.Float64Histogram("eds_store_get_car_blockstore_time_histogram", + metric.WithDescription("eds store get car blockstore time histogram(s)")) + if err != nil { + return err + } + + getDAHTime, err := meter.Float64Histogram("eds_store_get_dah_time_histogram", + metric.WithDescription("eds store get dah time histogram(s)")) + if err != nil { + return err + } + + removeTime, err := meter.Float64Histogram("eds_store_remove_time_histogram", + metric.WithDescription("eds store remove time histogram(s)")) + if err != nil { + return err + } + + getTime, err := meter.Float64Histogram("eds_store_get_time_histogram", + metric.WithDescription("eds store get time histogram(s)")) + if err != nil { + return err + } + + hasTime, err := meter.Float64Histogram("eds_store_has_time_histogram", + metric.WithDescription("eds store has time histogram(s)")) + if err != nil { + return err + } + + listTime, err := meter.Float64Histogram("eds_store_list_time_histogram", + metric.WithDescription("eds store list time histogram(s)")) + if err != nil { + return err + } + + getAccessorTime, err := meter.Float64Histogram("eds_store_get_accessor_time_histogram", + metric.WithDescription("eds store get accessor time histogram(s)")) + if err != nil { + return err + } + + longOpTime, err := meter.Float64Histogram("eds_store_long_operation_time_histogram", + metric.WithDescription("eds store long operation time histogram(s)")) + if err != nil { + return err + } + + gcTime, err := meter.Float64Histogram("eds_store_gc_time", + metric.WithDescription("dagstore gc time histogram(s)")) + if err != nil { + return err + } + + dagStoreShards, err := meter.Int64ObservableGauge("eds_store_dagstore_shards", + metric.WithDescription("dagstore amount of shards by status")) + if err != nil { + return err + } + + if err = s.cache.withMetrics(); err != nil { + return err + } + + callback := func(ctx context.Context, observer metric.Observer) error { + stats := s.dgstr.Stats() + for status, amount := range stats { + observer.ObserveInt64(dagStoreShards, int64(amount), + metric.WithAttributes( + attribute.String(dagstoreShardStatusKey, status.String()), + )) + } + return nil + } + + if _, err := meter.RegisterCallback(callback, dagStoreShards); err != nil { + return err + } + + s.metrics = &metrics{ + putTime: putTime, + getCARTime: getCARTime, + getCARBlockstoreTime: getCARBlockstoreTime, + getDAHTime: getDAHTime, + removeTime: removeTime, + getTime: getTime, + hasTime: hasTime, + listTime: listTime, + getAccessorTime: getAccessorTime, + longOpTime: longOpTime, + gcTime: gcTime, + } + return nil +} + +func (m *metrics) observeGCtime(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + m.gcTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observePut(ctx context.Context, dur time.Duration, result putResult, size uint) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.putTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.String(putResultKey, string(result)), + attribute.Int(sizeKey, int(size)))) +} + +func (m *metrics) observeLongOp(ctx context.Context, opName string, dur time.Duration, result longOpResult) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.longOpTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.String(opNameKey, opName), + attribute.String(longOpResultKey, string(result)))) +} + +func (m *metrics) observeGetCAR(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.getCARTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeCARBlockstore(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.getCARBlockstoreTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeGetDAH(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.getDAHTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeRemove(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.removeTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeGet(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.getTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeHas(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.hasTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeList(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.listTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeGetAccessor(ctx context.Context, dur time.Duration, cached, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.getAccessorTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(cachedKey, cached), + attribute.Bool(failedKey, failed))) +} diff --git a/share/eds/ods_test.go b/share/eds/ods_test.go index 75fad3022e..5b6ed5568b 100644 --- a/share/eds/ods_test.go +++ b/share/eds/ods_test.go @@ -52,7 +52,7 @@ func TestODSReader(t *testing.T) { assert.NoError(t, err) // check that original data from eds is same as data from reader - assert.Equal(t, original, block.RawData()[share.NamespaceSize:]) + assert.Equal(t, original, share.GetData(block.RawData())) } } @@ -89,6 +89,16 @@ func TestODSReaderReconstruction(t *testing.T) { // reconstruct EDS from ODSReader loaded, err := ReadEDS(ctx, odsR, dah.Hash()) assert.NoError(t, err) - require.Equal(t, eds.RowRoots(), loaded.RowRoots()) - require.Equal(t, eds.ColRoots(), loaded.ColRoots()) + + rowRoots, err := eds.RowRoots() + require.NoError(t, err) + loadedRowRoots, err := loaded.RowRoots() + require.NoError(t, err) + require.Equal(t, rowRoots, loadedRowRoots) + + colRoots, err := eds.ColRoots() + require.NoError(t, err) + loadedColRoots, err := loaded.ColRoots() + require.NoError(t, err) + require.Equal(t, colRoots, loadedColRoots) } diff --git a/share/eds/retriever.go b/share/eds/retriever.go index b2dcc4ff7a..e7837ae23a 100644 --- a/share/eds/retriever.go +++ b/share/eds/retriever.go @@ -17,6 +17,7 @@ import ( "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" @@ -64,7 +65,6 @@ func (r *Retriever) Retrieve(ctx context.Context, dah *da.DataAvailabilityHeader defer span.End() span.SetAttributes( attribute.Int("size", len(dah.RowRoots)), - attribute.String("data_hash", dah.String()), ) log.Debugw("retrieving data square", "data_hash", dah.String(), "size", len(dah.RowRoots)) @@ -122,12 +122,20 @@ type retrievalSession struct { // newSession creates a new retrieval session and kicks off requesting process. func (r *Retriever) newSession(ctx context.Context, dah *da.DataAvailabilityHeader) (*retrievalSession, error) { size := len(dah.RowRoots) + treeFn := func(_ rsmt2d.Axis, index uint) rsmt2d.Tree { - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(size)/2, index) + // use proofs adder if provided, to cache collected proofs while recomputing the eds + var opts []nmt.Option + visitor := ipld.ProofsAdderFromCtx(ctx).VisitFn() + if visitor != nil { + opts = append(opts, nmt.NodeVisitor(visitor)) + } + + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(size)/2, index, opts...) return &tree } - square, err := rsmt2d.ImportExtendedDataSquare(make([][]byte, size*size), share.DefaultRSMT2DCodec(), treeFn) + square, err := rsmt2d.NewExtendedDataSquare(share.DefaultRSMT2DCodec(), treeFn, uint(size), share.Size) if err != nil { return nil, err } @@ -145,6 +153,7 @@ func (r *Retriever) newSession(ctx context.Context, dah *da.DataAvailabilityHead for i := range ses.squareCellsLks { ses.squareCellsLks[i] = make([]sync.Mutex, size) } + go ses.request(ctx) return ses, nil } @@ -248,7 +257,6 @@ func (rs *retrievalSession) doRequest(ctx context.Context, q *quadrant) { nd, err := ipld.GetNode(ctx, rs.bget, root) if err != nil { rs.span.RecordError(err, trace.WithAttributes( - attribute.String("requesting-root", root.String()), attribute.Int("root-index", i), )) return @@ -256,7 +264,7 @@ func (rs *retrievalSession) doRequest(ctx context.Context, q *quadrant) { // and go get shares of left or the right side of the whole col/row axis // the left or the right side of the tree represent some portion of the quadrant // which we put into the rs.square share-by-share by calculating shares' indexes using q.index - share.GetShares(ctx, rs.bget, nd.Links()[q.x].Cid, size, func(j int, share share.Share) { + ipld.GetShares(ctx, rs.bget, nd.Links()[q.x].Cid, size, func(j int, share share.Share) { // NOTE: Each share can appear twice here, for a Row and Col, respectively. // These shares are always equal, and we allow only the first one to be written // in the square. @@ -283,10 +291,12 @@ func (rs *retrievalSession) doRequest(ctx context.Context, q *quadrant) { if rs.isReconstructed() { return } - if rs.square.GetCell(uint(x), uint(y)) != nil { + if err := rs.square.SetCell(uint(x), uint(y), share); err != nil { + // safe to ignore as: + // * share size already verified + // * the same share might come from either Row or Col return } - rs.square.SetCell(uint(x), uint(y), share) // if we have >= 1/4 of the square we can start trying to Reconstruct // TODO(@Wondertan): This is not an ideal way to know when to start // reconstruction and can cause idle reconstruction tries in some cases, diff --git a/share/eds/retriever_test.go b/share/eds/retriever_test.go index e90216de13..12b1c11083 100644 --- a/share/eds/retriever_test.go +++ b/share/eds/retriever_test.go @@ -3,6 +3,7 @@ package eds import ( "context" "errors" + "fmt" "testing" "time" @@ -20,7 +21,9 @@ import ( "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" ) func TestRetriever_Retrieve(t *testing.T) { @@ -48,18 +51,19 @@ func TestRetriever_Retrieve(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { // generate EDS - shares := share.RandShares(t, tc.squareSize*tc.squareSize) - in, err := share.AddShares(ctx, shares, bServ) + shares := sharetest.RandShares(t, tc.squareSize*tc.squareSize) + in, err := ipld.AddShares(ctx, shares, bServ) require.NoError(t, err) // limit with timeout, specifically retrieval ctx, cancel := context.WithTimeout(ctx, time.Minute*5) // the timeout is big for the max size which is long defer cancel() - dah := da.NewDataAvailabilityHeader(in) + dah, err := da.NewDataAvailabilityHeader(in) + require.NoError(t, err) out, err := r.Retrieve(ctx, &dah) require.NoError(t, err) - assert.True(t, share.EqualEDS(in, out)) + assert.True(t, in.Equals(out)) }) } } @@ -70,8 +74,8 @@ func TestRetriever_ByzantineError(t *testing.T) { defer cancel() bserv := mdutils.Bserv() - shares := share.ExtractEDS(share.RandEDS(t, width)) - _, err := share.ImportShares(ctx, shares, bserv) + shares := edstest.RandEDS(t, width).Flattened() + _, err := ipld.ImportShares(ctx, shares, bserv) require.NoError(t, err) // corrupt shares so that eds erasure coding does not match @@ -90,7 +94,8 @@ func TestRetriever_ByzantineError(t *testing.T) { require.NoError(t, err) // ensure we rcv an error - dah := da.NewDataAvailabilityHeader(attackerEDS) + dah, err := da.NewDataAvailabilityHeader(attackerEDS) + require.NoError(t, err) r := NewRetriever(bserv) _, err = r.Retrieve(ctx, &dah) var errByz *byzantine.ErrByzantine @@ -109,11 +114,12 @@ func TestRetriever_MultipleRandQuadrants(t *testing.T) { r := NewRetriever(bServ) // generate EDS - shares := share.RandShares(t, squareSize*squareSize) - in, err := share.AddShares(ctx, shares, bServ) + shares := sharetest.RandShares(t, squareSize*squareSize) + in, err := ipld.AddShares(ctx, shares, bServ) require.NoError(t, err) - dah := da.NewDataAvailabilityHeader(in) + dah, err := da.NewDataAvailabilityHeader(in) + require.NoError(t, err) ses, err := r.newSession(ctx, &dah) require.NoError(t, err) @@ -132,25 +138,114 @@ func TestFraudProofValidation(t *testing.T) { defer t.Cleanup(cancel) bServ := mdutils.Bserv() - var errByz *byzantine.ErrByzantine - faultHeader, err := generateByzantineError(ctx, t, bServ) - require.True(t, errors.As(err, &errByz)) + odsSize := []int{2, 4, 16, 32, 64, 128} + for _, size := range odsSize { + t.Run(fmt.Sprintf("ods size:%d", size), func(t *testing.T) { + var errByz *byzantine.ErrByzantine + faultHeader, err := generateByzantineError(ctx, t, size, bServ) + require.True(t, errors.As(err, &errByz)) - p := byzantine.CreateBadEncodingProof([]byte("hash"), uint64(faultHeader.Height()), errByz) - err = p.Validate(faultHeader) - require.NoError(t, err) + p := byzantine.CreateBadEncodingProof([]byte("hash"), faultHeader.Height(), errByz) + err = p.Validate(faultHeader) + require.NoError(t, err) + }) + } } func generateByzantineError( ctx context.Context, t *testing.T, + odsSize int, bServ blockservice.BlockService, ) (*header.ExtendedHeader, error) { - store := headertest.NewStore(t) - h, err := store.GetByHeight(ctx, 1) + eds := edstest.RandByzantineEDS(t, odsSize) + err := ipld.ImportEDS(ctx, eds, bServ) require.NoError(t, err) + h := headertest.ExtendedHeaderFromEDS(t, 1, eds) + _, err = NewRetriever(bServ).Retrieve(ctx, h.DAH) + + return h, err +} - faultHeader, _ := headertest.CreateFraudExtHeader(t, h, bServ) - _, err = NewRetriever(bServ).Retrieve(ctx, faultHeader.DAH) - return faultHeader, err +/* +BenchmarkBEFPValidation/ods_size:2 31273 38819 ns/op 68052 B/op 366 allocs/op +BenchmarkBEFPValidation/ods_size:4 14664 80439 ns/op 135892 B/op 894 allocs/op +BenchmarkBEFPValidation/ods_size:16 2850 386178 ns/op 587890 B/op 4945 allocs/op +BenchmarkBEFPValidation/ods_size:32 1399 874490 ns/op 1233399 B/op 11284 allocs/op +BenchmarkBEFPValidation/ods_size:64 619 2047540 ns/op 2578008 B/op 25364 allocs/op +BenchmarkBEFPValidation/ods_size:128 259 4934375 ns/op 5418406 B/op 56345 allocs/op +*/ +func BenchmarkBEFPValidation(b *testing.B) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) + defer b.Cleanup(cancel) + bServ := mdutils.Bserv() + r := NewRetriever(bServ) + t := &testing.T{} + odsSize := []int{2, 4, 16, 32, 64, 128} + for _, size := range odsSize { + b.Run(fmt.Sprintf("ods size:%d", size), func(b *testing.B) { + b.ResetTimer() + b.StopTimer() + eds := edstest.RandByzantineEDS(t, size) + err := ipld.ImportEDS(ctx, eds, bServ) + require.NoError(t, err) + h := headertest.ExtendedHeaderFromEDS(t, 1, eds) + _, err = r.Retrieve(ctx, h.DAH) + var errByz *byzantine.ErrByzantine + require.ErrorAs(t, err, &errByz) + b.StartTimer() + + for i := 0; i < b.N; i++ { + b.ReportAllocs() + p := byzantine.CreateBadEncodingProof([]byte("hash"), h.Height(), errByz) + err = p.Validate(h) + require.NoError(b, err) + } + }) + } +} + +/* +BenchmarkNewErrByzantineData/ods_size:2 29605 38846 ns/op 49518 B/op 579 allocs/op +BenchmarkNewErrByzantineData/ods_size:4 11380 105302 ns/op 134967 B/op 1571 allocs/op +BenchmarkNewErrByzantineData/ods_size:16 1902 631086 ns/op 830199 B/op 9601 allocs/op +BenchmarkNewErrByzantineData/ods_size:32 756 1530985 ns/op 1985272 B/op 22901 allocs/op +BenchmarkNewErrByzantineData/ods_size:64 340 3445544 ns/op 4767053 B/op 54704 allocs/op +BenchmarkNewErrByzantineData/ods_size:128 132 8740678 ns/op 11991093 B/op 136584 allocs/op +*/ +func BenchmarkNewErrByzantineData(b *testing.B) { + odsSize := []int{2, 4, 16, 32, 64, 128} + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + bServ := mdutils.Bserv() + r := NewRetriever(bServ) + t := &testing.T{} + for _, size := range odsSize { + b.Run(fmt.Sprintf("ods size:%d", size), func(b *testing.B) { + b.StopTimer() + eds := edstest.RandByzantineEDS(t, size) + err := ipld.ImportEDS(ctx, eds, bServ) + require.NoError(t, err) + h := headertest.ExtendedHeaderFromEDS(t, 1, eds) + ses, err := r.newSession(ctx, h.DAH) + require.NoError(t, err) + + select { + case <-ctx.Done(): + b.Fatal(ctx.Err()) + case <-ses.Done(): + } + + _, err = ses.Reconstruct(ctx) + assert.NoError(t, err) + var errByz *rsmt2d.ErrByzantineData + require.ErrorAs(t, err, &errByz) + b.StartTimer() + + for i := 0; i < b.N; i++ { + err = byzantine.NewErrByzantine(ctx, bServ, h.DAH, errByz) + require.NotNil(t, err) + } + }) + } } diff --git a/share/eds/store.go b/share/eds/store.go index f0d02a1141..24a96c9fe4 100644 --- a/share/eds/store.go +++ b/share/eds/store.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "context" + "encoding/hex" "errors" "fmt" "io" @@ -15,8 +16,8 @@ import ( "github.com/filecoin-project/dagstore/index" "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/dagstore/shard" + bstore "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-datastore" - bstore "github.com/ipfs/go-ipfs-blockstore" carv1 "github.com/ipld/go-car" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -34,7 +35,10 @@ const ( indexPath = "/index/" transientsPath = "/transients/" - defaultGCInterval = time.Hour + // GC performs DAG store garbage collection by reclaiming transient files of + // shards that are currently available but inactive, or errored. + // We don't use transient files right now, so GC is turned off by default. + defaultGCInterval = 0 ) var ErrNotFound = errors.New("eds not found in store") @@ -52,13 +56,15 @@ type Store struct { cache *blockstoreCache bs bstore.Blockstore - topIdx index.Inverted - carIdx index.FullIndexRepo + carIdx index.FullIndexRepo + invertedIdx *simpleInvertedIndex basepath string gcInterval time.Duration // lastGCResult is only stored on the store for testing purposes. lastGCResult atomic.Pointer[dagstore.GCResult] + + metrics *metrics } // NewStore creates a new EDS Store under the given basepath and datastore. @@ -69,7 +75,10 @@ func NewStore(basepath string, ds datastore.Batching) (*Store, error) { } r := mount.NewRegistry() - err = r.Register("fs", &mount.FileMount{Path: basepath + blocksPath}) + err = r.Register("fs", &inMemoryOnceMount{}) + if err != nil { + return nil, fmt.Errorf("failed to register memory mount on the registry: %w", err) + } if err != nil { return nil, fmt.Errorf("failed to register FS mount on the registry: %w", err) } @@ -79,14 +88,17 @@ func NewStore(basepath string, ds datastore.Batching) (*Store, error) { return nil, fmt.Errorf("failed to create index repository: %w", err) } - invertedRepo := newSimpleInvertedIndex(ds) + invertedIdx, err := newSimpleInvertedIndex(basepath) + if err != nil { + return nil, fmt.Errorf("failed to create index: %w", err) + } dagStore, err := dagstore.NewDAGStore( dagstore.Config{ TransientsDir: basepath + transientsPath, IndexRepo: fsRepo, Datastore: ds, MountRegistry: r, - TopLevelIndex: invertedRepo, + TopLevelIndex: invertedIdx, }, ) if err != nil { @@ -99,15 +111,15 @@ func NewStore(basepath string, ds datastore.Batching) (*Store, error) { } store := &Store{ - basepath: basepath, - dgstr: dagStore, - topIdx: invertedRepo, - carIdx: fsRepo, - gcInterval: defaultGCInterval, - mounts: r, - cache: cache, - } - store.bs = newBlockstore(store, cache) + basepath: basepath, + dgstr: dagStore, + carIdx: fsRepo, + invertedIdx: invertedIdx, + gcInterval: defaultGCInterval, + mounts: r, + cache: cache, + } + store.bs = newBlockstore(store, cache, ds) return store, nil } @@ -117,19 +129,25 @@ func (s *Store) Start(ctx context.Context) error { return err } // start Store only if DagStore succeeds - ctx, cancel := context.WithCancel(context.Background()) + runCtx, cancel := context.WithCancel(context.Background()) s.cancel = cancel // initialize empty gc result to avoid panic on access s.lastGCResult.Store(&dagstore.GCResult{ Shards: make(map[shard.Key]error), }) - go s.gc(ctx) + + if s.gcInterval != 0 { + go s.gc(runCtx) + } return nil } // Stop stops the underlying DAGStore. func (s *Store) Stop(context.Context) error { defer s.cancel() + if err := s.invertedIdx.close(); err != nil { + return err + } return s.dgstr.Close() } @@ -141,7 +159,9 @@ func (s *Store) gc(ctx context.Context) { case <-ctx.Done(): return case <-ticker.C: + tnow := time.Now() res, err := s.dgstr.GC(ctx) + s.metrics.observeGCtime(ctx, time.Since(tnow), err != nil) if err != nil { log.Errorf("garbage collecting dagstore: %v", err) return @@ -157,23 +177,30 @@ func (s *Store) gc(ctx context.Context) { // The square is verified on the Exchange level, and Put only stores the square, trusting it. // The resulting file stores all the shares and NMT Merkle Proofs of the EDS. // Additionally, the file gets indexed s.t. store.Blockstore can access them. -func (s *Store) Put(ctx context.Context, root share.DataHash, square *rsmt2d.ExtendedDataSquare) (err error) { - // if root already exists, short-circuit - has, err := s.Has(ctx, root) - if err != nil { - return fmt.Errorf("failed to check if root already exists in index: %w", err) - } - if has { - return dagstore.ErrShardExists - } - +func (s *Store) Put(ctx context.Context, root share.DataHash, square *rsmt2d.ExtendedDataSquare) error { ctx, span := tracer.Start(ctx, "store/put", trace.WithAttributes( - attribute.String("root", root.String()), attribute.Int("width", int(square.Width())), )) - defer func() { - utils.SetStatusAndEnd(span, err) - }() + + tnow := time.Now() + err := s.put(ctx, root, square) + result := putOK + switch { + case errors.Is(err, dagstore.ErrShardExists): + result = putExists + case err != nil: + result = putFailed + } + utils.SetStatusAndEnd(span, err) + s.metrics.observePut(ctx, time.Since(tnow), result, square.Width()) + return err +} + +func (s *Store) put(ctx context.Context, root share.DataHash, square *rsmt2d.ExtendedDataSquare) (err error) { + // if root already exists, short-circuit + if has, _ := s.Has(ctx, root); has { + return dagstore.ErrShardExists + } key := root.String() f, err := os.OpenFile(s.basepath+blocksPath+key, os.O_CREATE|os.O_WRONLY, 0600) @@ -182,21 +209,32 @@ func (s *Store) Put(ctx context.Context, root share.DataHash, square *rsmt2d.Ext } defer f.Close() - err = WriteEDS(ctx, square, f) + // save encoded eds into buffer + mount := &inMemoryOnceMount{ + // TODO: buffer could be pre-allocated with capacity calculated based on eds size. + buf: bytes.NewBuffer(nil), + FileMount: mount.FileMount{Path: s.basepath + blocksPath + key}, + } + err = WriteEDS(ctx, square, mount) if err != nil { return fmt.Errorf("failed to write EDS to file: %w", err) } + // write whole buffered mount data in one go to optimize i/o + if _, err = mount.WriteTo(f); err != nil { + return fmt.Errorf("failed to write EDS to file: %w", err) + } + ch := make(chan dagstore.ShardResult, 1) - err = s.dgstr.RegisterShard(ctx, shard.KeyFromString(key), &mount.FileMount{ - Path: s.basepath + blocksPath + key, - }, ch, dagstore.RegisterOpts{}) + err = s.dgstr.RegisterShard(ctx, shard.KeyFromString(key), mount, ch, dagstore.RegisterOpts{}) if err != nil { return fmt.Errorf("failed to initiate shard registration: %w", err) } select { case <-ctx.Done(): + // if context finished before result was received, track result in separate goroutine + go trackLateResult("put", ch, s.metrics, time.Minute*5) return ctx.Err() case result := <-ch: if result.Error != nil { @@ -206,6 +244,34 @@ func (s *Store) Put(ctx context.Context, root share.DataHash, square *rsmt2d.Ext } } +// waitForResult waits for a result from the res channel for a maximum duration specified by +// maxWait. If the result is not received within the specified duration, it logs an error +// indicating that the parent context has expired and the shard registration is stuck. If a result +// is received, it checks for any error and logs appropriate messages. +func trackLateResult(opName string, res <-chan dagstore.ShardResult, metrics *metrics, maxWait time.Duration) { + tnow := time.Now() + select { + case <-time.After(maxWait): + metrics.observeLongOp(context.Background(), opName, time.Since(tnow), longOpUnresolved) + log.Errorf("parent context is expired, while register shard is stuck for more than %v sec", time.Since(tnow)) + return + case result := <-res: + // don't observe if result was received right after launch of the func + if time.Since(tnow) < time.Second { + return + } + if result.Error != nil { + metrics.observeLongOp(context.Background(), opName, time.Since(tnow), longOpFailed) + log.Errorf("failed to register shard after context expired: %v ago, err: %w", time.Since(tnow), result.Error) + return + } + metrics.observeLongOp(context.Background(), opName, time.Since(tnow), longOpOK) + log.Warnf("parent context expired, but register shard finished with no error,"+ + " after context expired: %v ago", time.Since(tnow)) + return + } +} + // GetCAR takes a DataRoot and returns a buffered reader to the respective EDS serialized as a // CARv1 file. // The Reader strictly reads the CAR header and first quadrant (1/4) of the EDS, omitting all the @@ -214,9 +280,15 @@ func (s *Store) Put(ctx context.Context, root share.DataHash, square *rsmt2d.Ext // The shard is cached in the Store, so subsequent calls to GetCAR with the same root will use the // same reader. The cache is responsible for closing the underlying reader. func (s *Store) GetCAR(ctx context.Context, root share.DataHash) (io.Reader, error) { - ctx, span := tracer.Start(ctx, "store/get-car", trace.WithAttributes(attribute.String("root", root.String()))) - defer span.End() + ctx, span := tracer.Start(ctx, "store/get-car") + tnow := time.Now() + r, err := s.getCAR(ctx, root) + s.metrics.observeGetCAR(ctx, time.Since(tnow), err != nil) + utils.SetStatusAndEnd(span, err) + return r, err +} +func (s *Store) getCAR(ctx context.Context, root share.DataHash) (io.Reader, error) { key := root.String() accessor, err := s.getCachedAccessor(ctx, shard.KeyFromString(key)) if err != nil { @@ -240,6 +312,18 @@ func (s *Store) Blockstore() bstore.Blockstore { func (s *Store) CARBlockstore( ctx context.Context, root share.DataHash, +) (dagstore.ReadBlockstore, error) { + ctx, span := tracer.Start(ctx, "store/car-blockstore") + tnow := time.Now() + r, err := s.carBlockstore(ctx, root) + s.metrics.observeCARBlockstore(ctx, time.Since(tnow), err != nil) + utils.SetStatusAndEnd(span, err) + return r, err +} + +func (s *Store) carBlockstore( + ctx context.Context, + root share.DataHash, ) (dagstore.ReadBlockstore, error) { key := shard.KeyFromString(root.String()) accessor, err := s.getCachedAccessor(ctx, key) @@ -251,9 +335,15 @@ func (s *Store) CARBlockstore( // GetDAH returns the DataAvailabilityHeader for the EDS identified by DataHash. func (s *Store) GetDAH(ctx context.Context, root share.DataHash) (*share.Root, error) { - ctx, span := tracer.Start(ctx, "store/get-dah", trace.WithAttributes(attribute.String("root", root.String()))) - defer span.End() + ctx, span := tracer.Start(ctx, "store/car-dah") + tnow := time.Now() + r, err := s.getDAH(ctx, root) + s.metrics.observeGetDAH(ctx, time.Since(tnow), err != nil) + utils.SetStatusAndEnd(span, err) + return r, err +} +func (s *Store) getDAH(ctx context.Context, root share.DataHash) (*share.Root, error) { key := shard.KeyFromString(root.String()) accessor, err := s.getCachedAccessor(ctx, key) if err != nil { @@ -302,6 +392,7 @@ func (s *Store) getAccessor(ctx context.Context, key shard.Key) (*dagstore.Shard } return res.Accessor, nil case <-ctx.Done(): + go trackLateResult("get_shard", ch, s.metrics, time.Minute) return nil, ctx.Err() } } @@ -311,30 +402,40 @@ func (s *Store) getCachedAccessor(ctx context.Context, key shard.Key) (*accessor lk.Lock() defer lk.Unlock() + tnow := time.Now() accessor, err := s.cache.unsafeGet(key) if err != nil && err != errCacheMiss { log.Errorf("unexpected error while reading key from bs cache %s: %s", key, err) } if accessor != nil { + s.metrics.observeGetAccessor(ctx, time.Since(tnow), true, false) return accessor, nil } // wasn't found in cache, so acquire it and add to cache shardAccessor, err := s.getAccessor(ctx, key) if err != nil { + s.metrics.observeGetAccessor(ctx, time.Since(tnow), false, err != nil) return nil, err } - return s.cache.unsafeAdd(key, shardAccessor) + + a, err := s.cache.unsafeAdd(key, shardAccessor) + s.metrics.observeGetAccessor(ctx, time.Since(tnow), false, err != nil) + return a, err } // Remove removes EDS from Store by the given share.Root hash and cleans up all // the indexing. -func (s *Store) Remove(ctx context.Context, root share.DataHash) (err error) { - ctx, span := tracer.Start(ctx, "store/remove", trace.WithAttributes(attribute.String("root", root.String()))) - defer func() { - utils.SetStatusAndEnd(span, err) - }() +func (s *Store) Remove(ctx context.Context, root share.DataHash) error { + ctx, span := tracer.Start(ctx, "store/remove") + tnow := time.Now() + err := s.remove(ctx, root) + s.metrics.observeRemove(ctx, time.Since(tnow), err != nil) + utils.SetStatusAndEnd(span, err) + return err +} +func (s *Store) remove(ctx context.Context, root share.DataHash) (err error) { key := root.String() ch := make(chan dagstore.ShardResult, 1) err = s.dgstr.DestroyShard(ctx, shard.KeyFromString(key), ch, dagstore.DestroyOpts{}) @@ -348,6 +449,7 @@ func (s *Store) Remove(ctx context.Context, root share.DataHash) (err error) { return fmt.Errorf("failed to destroy shard: %w", result.Error) } case <-ctx.Done(): + go trackLateResult("remove", ch, s.metrics, time.Minute) return ctx.Err() } @@ -370,8 +472,17 @@ func (s *Store) Remove(ctx context.Context, root share.DataHash) (err error) { // // It reads only one quadrant(1/4) of the EDS and verifies the integrity of the stored data by // recomputing it. -func (s *Store) Get(ctx context.Context, root share.DataHash) (eds *rsmt2d.ExtendedDataSquare, err error) { - ctx, span := tracer.Start(ctx, "store/get", trace.WithAttributes(attribute.String("root", root.String()))) +func (s *Store) Get(ctx context.Context, root share.DataHash) (*rsmt2d.ExtendedDataSquare, error) { + ctx, span := tracer.Start(ctx, "store/get") + tnow := time.Now() + eds, err := s.get(ctx, root) + s.metrics.observeGet(ctx, time.Since(tnow), err != nil) + utils.SetStatusAndEnd(span, err) + return eds, err +} + +func (s *Store) get(ctx context.Context, root share.DataHash) (eds *rsmt2d.ExtendedDataSquare, err error) { + ctx, span := tracer.Start(ctx, "store/get") defer func() { utils.SetStatusAndEnd(span, err) }() @@ -388,10 +499,16 @@ func (s *Store) Get(ctx context.Context, root share.DataHash) (eds *rsmt2d.Exten } // Has checks if EDS exists by the given share.Root hash. -func (s *Store) Has(ctx context.Context, root share.DataHash) (bool, error) { - _, span := tracer.Start(ctx, "store/has", trace.WithAttributes(attribute.String("root", root.String()))) - defer span.End() +func (s *Store) Has(ctx context.Context, root share.DataHash) (has bool, err error) { + ctx, span := tracer.Start(ctx, "store/has") + tnow := time.Now() + eds, err := s.has(ctx, root) + s.metrics.observeHas(ctx, time.Since(tnow), err != nil) + utils.SetStatusAndEnd(span, err) + return eds, err +} +func (s *Store) has(_ context.Context, root share.DataHash) (bool, error) { key := root.String() info, err := s.dgstr.GetShardInfo(shard.KeyFromString(key)) switch err { @@ -404,6 +521,29 @@ func (s *Store) Has(ctx context.Context, root share.DataHash) (bool, error) { } } +// List lists all the registered EDSes. +func (s *Store) List() ([]share.DataHash, error) { + ctx, span := tracer.Start(context.Background(), "store/list") + tnow := time.Now() + hashes, err := s.list() + s.metrics.observeList(ctx, time.Since(tnow), err != nil) + utils.SetStatusAndEnd(span, err) + return hashes, err +} + +func (s *Store) list() ([]share.DataHash, error) { + shards := s.dgstr.AllShardsInfo() + hashes := make([]share.DataHash, 0, len(shards)) + for shrd := range shards { + hash, err := hex.DecodeString(shrd.String()) + if err != nil { + return nil, err + } + hashes = append(hashes, hash) + } + return hashes, nil +} + func setupPath(basepath string) error { err := os.MkdirAll(basepath+blocksPath, os.ModePerm) if err != nil { @@ -419,3 +559,39 @@ func setupPath(basepath string) error { } return nil } + +// inMemoryOnceMount is used to allow reading once from buffer before using main mount.Reader +type inMemoryOnceMount struct { + buf *bytes.Buffer + + readOnce atomic.Bool + mount.FileMount +} + +func (m *inMemoryOnceMount) Fetch(ctx context.Context) (mount.Reader, error) { + if m.buf != nil && !m.readOnce.Swap(true) { + reader := &inMemoryReader{Reader: bytes.NewReader(m.buf.Bytes())} + // release memory for gc, otherwise buffer will stick forever + m.buf = nil + return reader, nil + } + return m.FileMount.Fetch(ctx) +} + +func (m *inMemoryOnceMount) Write(b []byte) (int, error) { + return m.buf.Write(b) +} + +func (m *inMemoryOnceMount) WriteTo(w io.Writer) (int64, error) { + return io.Copy(w, bytes.NewReader(m.buf.Bytes())) +} + +// inMemoryReader extends bytes.Reader to implement mount.Reader interface +type inMemoryReader struct { + *bytes.Reader +} + +// Close allows inMemoryReader to satisfy mount.Reader interface +func (r *inMemoryReader) Close() error { + return nil +} diff --git a/share/eds/store_test.go b/share/eds/store_test.go index 6c6d7d10b2..4b263e7062 100644 --- a/share/eds/store_test.go +++ b/share/eds/store_test.go @@ -17,6 +17,7 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" ) func TestEDSStore(t *testing.T) { @@ -77,7 +78,7 @@ func TestEDSStore(t *testing.T) { original := eds.GetCell(uint(i), uint(j)) block, err := carReader.Next() assert.NoError(t, err) - assert.Equal(t, original, block.RawData()[share.NamespaceSize:]) + assert.Equal(t, original, share.GetData(block.RawData())) } } }) @@ -153,6 +154,23 @@ func TestEDSStore(t *testing.T) { _, err = edsStore.cache.Get(shardKey) assert.NoError(t, err, errCacheMiss) }) + + t.Run("List", func(t *testing.T) { + const amount = 10 + hashes := make([]share.DataHash, 0, amount) + for range make([]byte, amount) { + eds, dah := randomEDS(t) + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + hashes = append(hashes, dah.Hash()) + } + + hashesOut, err := edsStore.List() + require.NoError(t, err) + for _, hash := range hashes { + assert.Contains(t, hashesOut, hash) + } + }) } // TestEDSStore_GC verifies that unused transient shards are collected by the GC periodically. @@ -252,6 +270,51 @@ func Test_CachedAccessor(t *testing.T) { assert.Equal(t, firstBlock, secondBlock) } +func BenchmarkStore(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + b.Cleanup(cancel) + + tmpDir := b.TempDir() + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + edsStore, err := NewStore(tmpDir, ds) + require.NoError(b, err) + err = edsStore.Start(ctx) + require.NoError(b, err) + + // BenchmarkStore/bench_put_128-10 10 3231859283 ns/op (~3sec) + b.Run("bench put 128", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + // pause the timer for initializing test data + b.StopTimer() + eds := edstest.RandEDS(b, 128) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(b, err) + b.StartTimer() + + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(b, err) + } + }) + + // BenchmarkStore/bench_read_128-10 14 78970661 ns/op (~70ms) + b.Run("bench read 128", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + // pause the timer for initializing test data + b.StopTimer() + eds := edstest.RandEDS(b, 128) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(b, err) + _ = edsStore.Put(ctx, dah.Hash(), eds) + b.StartTimer() + + _, err = edsStore.Get(ctx, dah.Hash()) + require.NoError(b, err) + } + }) +} + func newStore(t *testing.T) (*Store, error) { t.Helper() @@ -261,8 +324,9 @@ func newStore(t *testing.T) (*Store, error) { } func randomEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, share.Root) { - eds := share.RandEDS(t, 4) - dah := da.NewDataAvailabilityHeader(eds) + eds := edstest.RandEDS(t, 4) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) return eds, dah } diff --git a/share/empty.go b/share/empty.go index 0b7ea2e775..07d48f2f07 100644 --- a/share/empty.go +++ b/share/empty.go @@ -2,10 +2,8 @@ package share import ( "bytes" - "context" "fmt" - - "github.com/ipfs/go-blockservice" + "sync" "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/pkg/da" @@ -13,53 +11,60 @@ import ( "github.com/celestiaorg/rsmt2d" ) +// EmptyRoot returns Root of the empty block EDS. +func EmptyRoot() *Root { + initEmpty() + return emptyBlockRoot +} + +// EmptyExtendedDataSquare returns the EDS of the empty block data square. +func EmptyExtendedDataSquare() *rsmt2d.ExtendedDataSquare { + initEmpty() + return emptyBlockEDS +} + +// EmptyBlockShares returns the shares of the empty block. +func EmptyBlockShares() []Share { + initEmpty() + return emptyBlockShares +} + var ( - emptyRoot *Root - emptyEDS *rsmt2d.ExtendedDataSquare + emptyMu sync.Mutex + emptyBlockRoot *Root + emptyBlockEDS *rsmt2d.ExtendedDataSquare + emptyBlockShares []Share ) -func init() { +// initEmpty enables lazy initialization for constant empty block data. +func initEmpty() { + emptyMu.Lock() + defer emptyMu.Unlock() + if emptyBlockRoot != nil { + return + } + // compute empty block EDS and DAH for it - shares := emptyDataSquare() - eds, err := da.ExtendShares(shares) + result := shares.TailPaddingShares(appconsts.MinShareCount) + emptyBlockShares = shares.ToBytes(result) + + eds, err := da.ExtendShares(emptyBlockShares) if err != nil { panic(fmt.Errorf("failed to create empty EDS: %w", err)) } - emptyEDS = eds + emptyBlockEDS = eds - dah := da.NewDataAvailabilityHeader(eds) + dah, err := da.NewDataAvailabilityHeader(eds) + if err != nil { + panic(fmt.Errorf("failed to create empty DAH: %w", err)) + } minDAH := da.MinDataAvailabilityHeader() if !bytes.Equal(minDAH.Hash(), dah.Hash()) { panic(fmt.Sprintf("mismatch in calculated minimum DAH and minimum DAH from celestia-app, "+ "expected %s, got %s", minDAH.String(), dah.String())) } - emptyRoot = &dah + emptyBlockRoot = &dah // precompute Hash, so it's cached internally to avoid potential races - emptyRoot.Hash() -} - -// EmptyRoot returns Root of an empty EDS. -func EmptyRoot() *Root { - return emptyRoot -} - -// EnsureEmptySquareExists checks if the given DAG contains an empty block data square. -// If it does not, it stores an empty block. This optimization exists to prevent -// redundant storing of empty block data so that it is only stored once and returned -// upon request for a block with an empty data square. Ref: header/constructors.go#L56 -func EnsureEmptySquareExists(ctx context.Context, bServ blockservice.BlockService) (*rsmt2d.ExtendedDataSquare, error) { - shares := emptyDataSquare() - return AddShares(ctx, shares, bServ) -} - -// EmptyExtendedDataSquare returns the EDS of the empty block data square. -func EmptyExtendedDataSquare() *rsmt2d.ExtendedDataSquare { - return emptyEDS -} - -// emptyDataSquare returns the minimum size data square filled with tail padding. -func emptyDataSquare() [][]byte { - result := shares.TailPaddingShares(appconsts.MinShareCount) - return shares.ToBytes(result) + emptyBlockRoot.Hash() } diff --git a/share/getter.go b/share/getter.go index f7a7b9c129..18d3873de1 100644 --- a/share/getter.go +++ b/share/getter.go @@ -2,22 +2,20 @@ package share import ( "context" + "crypto/sha256" "errors" "fmt" - "github.com/minio/sha256-simd" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/rsmt2d" ) var ( // ErrNotFound is used to indicate that requested data could not be found. ErrNotFound = errors.New("share: data not found") - // ErrNamespaceNotFound is returned by GetSharesByNamespace when data for requested root does - // not include any shares from the given namespace - ErrNamespaceNotFound = errors.New("share: namespace not found in data") + // ErrOutOfBounds is used to indicate that a passed row or column index is out of bounds of the + // square size. + ErrOutOfBounds = errors.New("share: row or column index is larger than square size") ) // Getter interface provides a set of accessors for shares by the Root. @@ -33,7 +31,10 @@ type Getter interface { // GetSharesByNamespace gets all shares from an EDS within the given namespace. // Shares are returned in a row-by-row order if the namespace spans multiple rows. - GetSharesByNamespace(context.Context, *Root, namespace.ID) (NamespacedShares, error) + // Inclusion of returned data could be verified using Verify method on NamespacedShares. + // If no shares are found for target namespace non-inclusion could be also verified by calling + // Verify method. + GetSharesByNamespace(context.Context, *Root, Namespace) (NamespacedShares, error) } // NamespacedShares represents all shares with proofs within a specific namespace of an EDS. @@ -50,15 +51,15 @@ func (ns NamespacedShares) Flatten() []Share { // NamespacedRow represents all shares with proofs within a specific namespace of a single EDS row. type NamespacedRow struct { - Shares []Share - Proof *nmt.Proof + Shares []Share `json:"shares"` + Proof *nmt.Proof `json:"proof"` } // Verify validates NamespacedShares by checking every row with nmt inclusion proof. -func (ns NamespacedShares) Verify(root *Root, nID namespace.ID) error { - originalRoots := make([][]byte, 0) +func (ns NamespacedShares) Verify(root *Root, namespace Namespace) error { + var originalRoots [][]byte for _, row := range root.RowRoots { - if !nID.Less(nmt.MinNamespace(row, nID.Size())) && nID.LessOrEqual(nmt.MaxNamespace(row, nID.Size())) { + if !namespace.IsOutsideRange(row, row) { originalRoots = append(originalRoots, row) } } @@ -70,7 +71,7 @@ func (ns NamespacedShares) Verify(root *Root, nID namespace.ID) error { for i, row := range ns { // verify row data against row hash from original root - if !row.verify(originalRoots[i], nID) { + if !row.verify(originalRoots[i], namespace) { return fmt.Errorf("row verification failed: row %d doesn't match original root: %s", i, root.String()) } } @@ -78,17 +79,18 @@ func (ns NamespacedShares) Verify(root *Root, nID namespace.ID) error { } // verify validates the row using nmt inclusion proof. -func (row *NamespacedRow) verify(rowRoot []byte, nID namespace.ID) bool { +func (row *NamespacedRow) verify(rowRoot []byte, namespace Namespace) bool { // construct nmt leaves from shares by prepending namespace leaves := make([][]byte, 0, len(row.Shares)) - for _, sh := range row.Shares { - leaves = append(leaves, append(sh[:NamespaceSize], sh...)) + for _, shr := range row.Shares { + leaves = append(leaves, append(GetNamespace(shr), shr...)) } // verify namespace return row.Proof.VerifyNamespace( sha256.New(), - nID, + namespace.ToNMT(), leaves, - rowRoot) + rowRoot, + ) } diff --git a/share/getters/cascade.go b/share/getters/cascade.go index 1a0d8fb274..63d7713d3d 100644 --- a/share/getters/cascade.go +++ b/share/getters/cascade.go @@ -2,13 +2,11 @@ package getters import ( "context" - "encoding/hex" "errors" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/libs/utils" @@ -35,12 +33,17 @@ func NewCascadeGetter(getters []share.Getter) *CascadeGetter { // GetShare gets a share from any of registered share.Getters in cascading order. func (cg *CascadeGetter) GetShare(ctx context.Context, root *share.Root, row, col int) (share.Share, error) { ctx, span := tracer.Start(ctx, "cascade/get-share", trace.WithAttributes( - attribute.String("root", root.String()), attribute.Int("row", row), attribute.Int("col", col), )) defer span.End() + upperBound := len(root.RowRoots) + if row >= upperBound || col >= upperBound { + err := share.ErrOutOfBounds + span.RecordError(err) + return nil, err + } get := func(ctx context.Context, get share.Getter) (share.Share, error) { return get.GetShare(ctx, root, row, col) } @@ -50,9 +53,7 @@ func (cg *CascadeGetter) GetShare(ctx context.Context, root *share.Root, row, co // GetEDS gets a full EDS from any of registered share.Getters in cascading order. func (cg *CascadeGetter) GetEDS(ctx context.Context, root *share.Root) (*rsmt2d.ExtendedDataSquare, error) { - ctx, span := tracer.Start(ctx, "cascade/get-eds", trace.WithAttributes( - attribute.String("root", root.String()), - )) + ctx, span := tracer.Start(ctx, "cascade/get-eds") defer span.End() get := func(ctx context.Context, get share.Getter) (*rsmt2d.ExtendedDataSquare, error) { @@ -67,16 +68,15 @@ func (cg *CascadeGetter) GetEDS(ctx context.Context, root *share.Root) (*rsmt2d. func (cg *CascadeGetter) GetSharesByNamespace( ctx context.Context, root *share.Root, - id namespace.ID, + namespace share.Namespace, ) (share.NamespacedShares, error) { ctx, span := tracer.Start(ctx, "cascade/get-shares-by-namespace", trace.WithAttributes( - attribute.String("root", root.String()), - attribute.String("nid", hex.EncodeToString(id)), + attribute.String("namespace", namespace.String()), )) defer span.End() get := func(ctx context.Context, get share.Getter) (share.NamespacedShares, error) { - return get.GetSharesByNamespace(ctx, root, id) + return get.GetSharesByNamespace(ctx, root, namespace) } return cascadeGetters(ctx, cg.getters, get) @@ -122,8 +122,8 @@ func cascadeGetters[V any]( getCtx, cancel := ctxWithSplitTimeout(ctx, len(getters)-i, 0) val, getErr := get(getCtx, getter) cancel() - if getErr == nil || errors.Is(getErr, share.ErrNamespaceNotFound) { - return val, getErr + if getErr == nil { + return val, nil } if errors.Is(getErr, errOperationNotSupported) { diff --git a/share/getters/getter_test.go b/share/getters/getter_test.go index c9bf82031a..02e075459b 100644 --- a/share/getters/getter_test.go +++ b/share/getters/getter_test.go @@ -5,21 +5,23 @@ import ( "testing" "time" + "github.com/ipfs/boxo/exchange/offline" bsrv "github.com/ipfs/go-blockservice" "github.com/ipfs/go-datastore" ds_sync "github.com/ipfs/go-datastore/sync" - offline "github.com/ipfs/go-ipfs-exchange-offline" mdutils "github.com/ipfs/go-merkledag/test" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-app/pkg/namespace" "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" ) func TestTeeGetter(t *testing.T) { @@ -39,8 +41,8 @@ func TestTeeGetter(t *testing.T) { tg := NewTeeGetter(ig, edsStore) t.Run("TeesToEDSStore", func(t *testing.T) { - eds, dah := randomEDS(t) - _, err := share.ImportShares(ctx, eds.Flattened(), bServ) + randEds, dah := randomEDS(t) + _, err := ipld.ImportShares(ctx, randEds.Flattened(), bServ) require.NoError(t, err) // eds store doesn't have the EDS yet @@ -50,7 +52,7 @@ func TestTeeGetter(t *testing.T) { retrievedEDS, err := tg.GetEDS(ctx, &dah) require.NoError(t, err) - require.True(t, share.EqualEDS(eds, retrievedEDS)) + require.True(t, randEds.Equals(retrievedEDS)) // eds store now has the EDS and it can be retrieved ok, err = edsStore.Has(ctx, dah.Hash()) @@ -58,22 +60,22 @@ func TestTeeGetter(t *testing.T) { assert.NoError(t, err) finalEDS, err := edsStore.Get(ctx, dah.Hash()) assert.NoError(t, err) - require.True(t, share.EqualEDS(eds, finalEDS)) + require.True(t, randEds.Equals(finalEDS)) }) t.Run("ShardAlreadyExistsDoesntError", func(t *testing.T) { - eds, dah := randomEDS(t) - _, err := share.ImportShares(ctx, eds.Flattened(), bServ) + randEds, dah := randomEDS(t) + _, err := ipld.ImportShares(ctx, randEds.Flattened(), bServ) require.NoError(t, err) retrievedEDS, err := tg.GetEDS(ctx, &dah) require.NoError(t, err) - require.True(t, share.EqualEDS(eds, retrievedEDS)) + require.True(t, randEds.Equals(retrievedEDS)) // no error should be returned, even though the EDS identified by the DAH already exists retrievedEDS, err = tg.GetEDS(ctx, &dah) require.NoError(t, err) - require.True(t, share.EqualEDS(eds, retrievedEDS)) + require.True(t, randEds.Equals(retrievedEDS)) }) } @@ -92,19 +94,23 @@ func TestStoreGetter(t *testing.T) { sg := NewStoreGetter(edsStore) t.Run("GetShare", func(t *testing.T) { - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) + randEds, dah := randomEDS(t) + err = edsStore.Put(ctx, dah.Hash(), randEds) require.NoError(t, err) - squareSize := int(eds.Width()) + squareSize := int(randEds.Width()) for i := 0; i < squareSize; i++ { for j := 0; j < squareSize; j++ { share, err := sg.GetShare(ctx, &dah, i, j) require.NoError(t, err) - assert.Equal(t, eds.GetCell(uint(i), uint(j)), share) + assert.Equal(t, randEds.GetCell(uint(i), uint(j)), share) } } + // doesn't panic on indexes too high + _, err := sg.GetShare(ctx, &dah, squareSize, squareSize) + require.ErrorIs(t, err, share.ErrOutOfBounds) + // root not found _, dah = randomEDS(t) _, err = sg.GetShare(ctx, &dah, 0, 0) @@ -112,13 +118,13 @@ func TestStoreGetter(t *testing.T) { }) t.Run("GetEDS", func(t *testing.T) { - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) + randEds, dah := randomEDS(t) + err = edsStore.Put(ctx, dah.Hash(), randEds) require.NoError(t, err) retrievedEDS, err := sg.GetEDS(ctx, &dah) require.NoError(t, err) - assert.True(t, share.EqualEDS(eds, retrievedEDS)) + assert.True(t, randEds.Equals(retrievedEDS)) // root not found root := share.Root{} @@ -127,23 +133,24 @@ func TestStoreGetter(t *testing.T) { }) t.Run("GetSharesByNamespace", func(t *testing.T) { - eds, nID, dah := randomEDSWithDoubledNamespace(t, 4) - err = edsStore.Put(ctx, dah.Hash(), eds) + randEds, namespace, dah := randomEDSWithDoubledNamespace(t, 4) + err = edsStore.Put(ctx, dah.Hash(), randEds) require.NoError(t, err) - shares, err := sg.GetSharesByNamespace(ctx, &dah, nID) + shares, err := sg.GetSharesByNamespace(ctx, &dah, namespace) require.NoError(t, err) - require.NoError(t, shares.Verify(&dah, nID)) + require.NoError(t, shares.Verify(&dah, namespace)) assert.Len(t, shares.Flatten(), 2) - // nid not found - nID = make([]byte, namespace.NamespaceSize) - _, err = sg.GetSharesByNamespace(ctx, &dah, nID) - require.ErrorIs(t, err, share.ErrNamespaceNotFound) + // namespace not found + randNamespace := sharetest.RandV0Namespace() + emptyShares, err := sg.GetSharesByNamespace(ctx, &dah, randNamespace) + require.NoError(t, err) + require.Empty(t, emptyShares.Flatten()) // root not found root := share.Root{} - _, err = sg.GetSharesByNamespace(ctx, &root, nID) + _, err = sg.GetSharesByNamespace(ctx, &root, namespace) require.ErrorIs(t, err, share.ErrNotFound) }) } @@ -160,26 +167,31 @@ func TestIPLDGetter(t *testing.T) { err = edsStore.Start(ctx) require.NoError(t, err) - bserv := bsrv.New(edsStore.Blockstore(), offline.Exchange(edsStore.Blockstore())) + bStore := edsStore.Blockstore() + bserv := bsrv.New(bStore, offline.Exchange(bStore)) sg := NewIPLDGetter(bserv) t.Run("GetShare", func(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Second) t.Cleanup(cancel) - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) + randEds, dah := randomEDS(t) + err = edsStore.Put(ctx, dah.Hash(), randEds) require.NoError(t, err) - squareSize := int(eds.Width()) + squareSize := int(randEds.Width()) for i := 0; i < squareSize; i++ { for j := 0; j < squareSize; j++ { share, err := sg.GetShare(ctx, &dah, i, j) require.NoError(t, err) - assert.Equal(t, eds.GetCell(uint(i), uint(j)), share) + assert.Equal(t, randEds.GetCell(uint(i), uint(j)), share) } } + // doesn't panic on indexes too high + _, err := sg.GetShare(ctx, &dah, squareSize+1, squareSize+1) + require.ErrorIs(t, err, share.ErrOutOfBounds) + // root not found _, dah = randomEDS(t) _, err = sg.GetShare(ctx, &dah, 0, 0) @@ -190,46 +202,53 @@ func TestIPLDGetter(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Second) t.Cleanup(cancel) - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) + randEds, dah := randomEDS(t) + err = edsStore.Put(ctx, dah.Hash(), randEds) require.NoError(t, err) retrievedEDS, err := sg.GetEDS(ctx, &dah) require.NoError(t, err) - assert.True(t, share.EqualEDS(eds, retrievedEDS)) + assert.True(t, randEds.Equals(retrievedEDS)) + + // Ensure blocks still exist after cleanup + colRoots, _ := retrievedEDS.ColRoots() + has, err := bStore.Has(ctx, ipld.MustCidFromNamespacedSha256(colRoots[0])) + assert.NoError(t, err) + assert.True(t, has) }) t.Run("GetSharesByNamespace", func(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Second) t.Cleanup(cancel) - eds, nID, dah := randomEDSWithDoubledNamespace(t, 4) - err = edsStore.Put(ctx, dah.Hash(), eds) + randEds, namespace, dah := randomEDSWithDoubledNamespace(t, 4) + err = edsStore.Put(ctx, dah.Hash(), randEds) require.NoError(t, err) // first check that shares are returned correctly if they exist - shares, err := sg.GetSharesByNamespace(ctx, &dah, nID) + shares, err := sg.GetSharesByNamespace(ctx, &dah, namespace) require.NoError(t, err) - require.NoError(t, shares.Verify(&dah, nID)) + require.NoError(t, shares.Verify(&dah, namespace)) assert.Len(t, shares.Flatten(), 2) - // nid not found - nID = make([]byte, namespace.NamespaceSize) - emptyShares, err := sg.GetSharesByNamespace(ctx, &dah, nID) - require.ErrorIs(t, err, share.ErrNamespaceNotFound) - require.Nil(t, emptyShares) + // namespace not found + randNamespace := sharetest.RandV0Namespace() + emptyShares, err := sg.GetSharesByNamespace(ctx, &dah, randNamespace) + require.NoError(t, err) + require.Empty(t, emptyShares.Flatten()) // nid doesnt exist in root root := share.Root{} - _, err = sg.GetSharesByNamespace(ctx, &root, nID) - require.ErrorIs(t, err, share.ErrNamespaceNotFound) + emptyShares, err = sg.GetSharesByNamespace(ctx, &root, namespace) + require.NoError(t, err) + require.Empty(t, emptyShares.Flatten()) }) } func randomEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, share.Root) { - eds := share.RandEDS(t, 4) - dah := da.NewDataAvailabilityHeader(eds) - + eds := edstest.RandEDS(t, 4) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) return eds, dah } @@ -237,7 +256,7 @@ func randomEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, share.Root) { // middle that share a namespace. func randomEDSWithDoubledNamespace(t *testing.T, size int) (*rsmt2d.ExtendedDataSquare, []byte, share.Root) { n := size * size - randShares := share.RandShares(t, n) + randShares := sharetest.RandShares(t, n) idx1 := (n - 1) / 2 idx2 := n / 2 @@ -249,7 +268,7 @@ func randomEDSWithDoubledNamespace(t *testing.T, size int) (*rsmt2d.ExtendedData // D _ _ _ // _ _ _ _ // where the D shares have a common namespace. - copy(randShares[idx2][:share.NamespaceSize], randShares[idx1][:share.NamespaceSize]) + copy(share.GetNamespace(randShares[idx2]), share.GetNamespace(randShares[idx1])) eds, err := rsmt2d.ComputeExtendedDataSquare( randShares, @@ -257,7 +276,8 @@ func randomEDSWithDoubledNamespace(t *testing.T, size int) (*rsmt2d.ExtendedData wrapper.NewConstructor(uint64(size)), ) require.NoError(t, err, "failure to recompute the extended data square") - dah := da.NewDataAvailabilityHeader(eds) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) - return eds, randShares[idx1][:share.NamespaceSize], dah + return eds, share.GetNamespace(randShares[idx1]), dah } diff --git a/share/getters/ipld.go b/share/getters/ipld.go index 04a1f4f728..a892e0fc82 100644 --- a/share/getters/ipld.go +++ b/share/getters/ipld.go @@ -2,7 +2,6 @@ package getters import ( "context" - "encoding/hex" "errors" "fmt" "sync" @@ -12,7 +11,6 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/libs/utils" @@ -43,7 +41,6 @@ func NewIPLDGetter(bServ blockservice.BlockService) *IPLDGetter { func (ig *IPLDGetter) GetShare(ctx context.Context, dah *share.Root, row, col int) (share.Share, error) { var err error ctx, span := tracer.Start(ctx, "ipld/get-share", trace.WithAttributes( - attribute.String("root", dah.String()), attribute.Int("row", row), attribute.Int("col", col), )) @@ -51,11 +48,17 @@ func (ig *IPLDGetter) GetShare(ctx context.Context, dah *share.Root, row, col in utils.SetStatusAndEnd(span, err) }() + upperBound := len(dah.RowRoots) + if row >= upperBound || col >= upperBound { + err := share.ErrOutOfBounds + span.RecordError(err) + return nil, err + } root, leaf := ipld.Translate(dah, row, col) // wrap the blockservice in a session if it has been signaled in the context. blockGetter := getGetter(ctx, ig.bServ) - s, err := share.GetShare(ctx, blockGetter, root, leaf, len(dah.RowRoots)) + s, err := ipld.GetShare(ctx, blockGetter, root, leaf, len(dah.RowRoots)) if errors.Is(err, ipld.ErrNodeNotFound) { // convert error to satisfy getter interface contract err = share.ErrNotFound @@ -68,9 +71,7 @@ func (ig *IPLDGetter) GetShare(ctx context.Context, dah *share.Root, row, col in } func (ig *IPLDGetter) GetEDS(ctx context.Context, root *share.Root) (eds *rsmt2d.ExtendedDataSquare, err error) { - ctx, span := tracer.Start(ctx, "ipld/get-eds", trace.WithAttributes( - attribute.String("root", root.String()), - )) + ctx, span := tracer.Start(ctx, "ipld/get-eds") defer func() { utils.SetStatusAndEnd(span, err) }() @@ -90,24 +91,22 @@ func (ig *IPLDGetter) GetEDS(ctx context.Context, root *share.Root) (eds *rsmt2d func (ig *IPLDGetter) GetSharesByNamespace( ctx context.Context, root *share.Root, - nID namespace.ID, + namespace share.Namespace, ) (shares share.NamespacedShares, err error) { ctx, span := tracer.Start(ctx, "ipld/get-shares-by-namespace", trace.WithAttributes( - attribute.String("root", root.String()), - attribute.String("nid", hex.EncodeToString(nID)), + attribute.String("namespace", namespace.String()), )) defer func() { utils.SetStatusAndEnd(span, err) }() - err = verifyNIDSize(nID) - if err != nil { - return nil, fmt.Errorf("getter/ipld: invalid namespace ID: %w", err) + if err = namespace.ValidateForData(); err != nil { + return nil, err } // wrap the blockservice in a session if it has been signaled in the context. blockGetter := getGetter(ctx, ig.bServ) - shares, err = collectSharesByNamespace(ctx, blockGetter, root, nID) + shares, err = collectSharesByNamespace(ctx, blockGetter, root, namespace) if errors.Is(err, ipld.ErrNodeNotFound) { // convert error to satisfy getter interface contract err = share.ErrNotFound diff --git a/share/getters/shrex.go b/share/getters/shrex.go index 58a85abc0c..5a2f696854 100644 --- a/share/getters/shrex.go +++ b/share/getters/shrex.go @@ -2,20 +2,18 @@ package getters import ( "context" - "encoding/hex" "errors" "fmt" "time" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/instrument/syncint64" - "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" - "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/p2p" "github.com/celestiaorg/celestia-node/share/p2p/peers" @@ -32,11 +30,11 @@ const ( defaultMinAttemptsCount = 3 ) -var meter = global.MeterProvider().Meter("shrex/getter") +var meter = otel.Meter("shrex/getter") type metrics struct { - edsAttempts syncint64.Histogram - ndAttempts syncint64.Histogram + edsAttempts metric.Int64Histogram + ndAttempts metric.Int64Histogram } func (m *metrics) recordEDSAttempt(ctx context.Context, attemptCount int, success bool) { @@ -46,7 +44,9 @@ func (m *metrics) recordEDSAttempt(ctx context.Context, attemptCount int, succes if ctx.Err() != nil { ctx = context.Background() } - m.edsAttempts.Record(ctx, int64(attemptCount), attribute.Bool("success", success)) + m.edsAttempts.Record(ctx, int64(attemptCount), + metric.WithAttributes( + attribute.Bool("success", success))) } func (m *metrics) recordNDAttempt(ctx context.Context, attemptCount int, success bool) { @@ -56,23 +56,23 @@ func (m *metrics) recordNDAttempt(ctx context.Context, attemptCount int, success if ctx.Err() != nil { ctx = context.Background() } - m.ndAttempts.Record(ctx, int64(attemptCount), attribute.Bool("success", success)) + m.ndAttempts.Record(ctx, int64(attemptCount), + metric.WithAttributes( + attribute.Bool("success", success))) } func (sg *ShrexGetter) WithMetrics() error { - edsAttemptHistogram, err := meter.SyncInt64().Histogram( + edsAttemptHistogram, err := meter.Int64Histogram( "getters_shrex_eds_attempts_per_request", - instrument.WithUnit(unit.Dimensionless), - instrument.WithDescription("Number of attempts per shrex/eds request"), + metric.WithDescription("Number of attempts per shrex/eds request"), ) if err != nil { return err } - ndAttemptHistogram, err := meter.SyncInt64().Histogram( + ndAttemptHistogram, err := meter.Int64Histogram( "getters_shrex_nd_attempts_per_request", - instrument.WithUnit(unit.Dimensionless), - instrument.WithDescription("Number of attempts per shrex/nd request"), + metric.WithDescription("Number of attempts per shrex/nd request"), ) if err != nil { return err @@ -128,6 +128,11 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, root *share.Root) (*rsmt2d.Ex attempt int err error ) + ctx, span := tracer.Start(ctx, "shrex/get-eds") + defer func() { + utils.SetStatusAndEnd(span, err) + }() + for { if ctx.Err() != nil { sg.metrics.recordEDSAttempt(ctx, attempt, false) @@ -181,17 +186,26 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, root *share.Root) (*rsmt2d.Ex func (sg *ShrexGetter) GetSharesByNamespace( ctx context.Context, root *share.Root, - id namespace.ID, + namespace share.Namespace, ) (share.NamespacedShares, error) { + if err := namespace.ValidateForData(); err != nil { + return nil, err + } var ( attempt int err error ) + ctx, span := tracer.Start(ctx, "shrex/get-shares-by-namespace", trace.WithAttributes( + attribute.String("namespace", namespace.String()), + )) + defer func() { + utils.SetStatusAndEnd(span, err) + }() // verify that the namespace could exist inside the roots before starting network requests - roots := filterRootsByNamespace(root, id) + roots := filterRootsByNamespace(root, namespace) if len(roots) == 0 { - return nil, share.ErrNamespaceNotFound + return nil, nil } for { @@ -205,7 +219,7 @@ func (sg *ShrexGetter) GetSharesByNamespace( if getErr != nil { log.Debugw("nd: couldn't find peer", "hash", root.String(), - "nid", hex.EncodeToString(id), + "namespace", namespace.String(), "err", getErr, "finished (s)", time.Since(start)) sg.metrics.recordNDAttempt(ctx, attempt, false) @@ -214,22 +228,19 @@ func (sg *ShrexGetter) GetSharesByNamespace( reqStart := time.Now() reqCtx, cancel := ctxWithSplitTimeout(ctx, sg.minAttemptsCount-attempt+1, sg.minRequestTimeout) - nd, getErr := sg.ndClient.RequestND(reqCtx, root, id, peer) + nd, getErr := sg.ndClient.RequestND(reqCtx, root, namespace, peer) cancel() switch { case getErr == nil: - if getErr = nd.Verify(root, id); getErr != nil { + // both inclusion and non-inclusion cases needs verification + if verErr := nd.Verify(root, namespace); verErr != nil { + getErr = verErr setStatus(peers.ResultBlacklistPeer) break } setStatus(peers.ResultNoop) sg.metrics.recordNDAttempt(ctx, attempt, true) - return nd, getErr - case errors.Is(getErr, share.ErrNamespaceNotFound): - // TODO: will be merged with first case once non-inclusion proofs are ready - setStatus(peers.ResultNoop) - sg.metrics.recordNDAttempt(ctx, attempt, true) - return nd, getErr + return nd, nil case errors.Is(getErr, context.DeadlineExceeded), errors.Is(getErr, context.Canceled): setStatus(peers.ResultCooldownPeer) @@ -247,7 +258,7 @@ func (sg *ShrexGetter) GetSharesByNamespace( } log.Debugw("nd: request failed", "hash", root.String(), - "nid", hex.EncodeToString(id), + "namespace", namespace.String(), "peer", peer.String(), "attempt", attempt, "err", getErr, diff --git a/share/getters/shrex_test.go b/share/getters/shrex_test.go index db60e0138a..0ca807d0d4 100644 --- a/share/getters/shrex_test.go +++ b/share/getters/shrex_test.go @@ -1,7 +1,12 @@ package getters import ( + "bytes" "context" + "encoding/binary" + "errors" + "math/rand" + "sort" "testing" "time" @@ -15,20 +20,22 @@ import ( "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/celestiaorg/celestia-app/pkg/wrapper" libhead "github.com/celestiaorg/go-header" - nmtnamespace "github.com/celestiaorg/nmt/namespace" + "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/p2p/discovery" "github.com/celestiaorg/celestia-node/share/p2p/peers" "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/sharetest" ) func TestShrexGetter(t *testing.T) { @@ -56,21 +63,22 @@ func TestShrexGetter(t *testing.T) { getter := NewShrexGetter(edsClient, ndClient, peerManager) require.NoError(t, getter.Start(ctx)) - t.Run("ND_Available", func(t *testing.T) { - ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Run("ND_Available, total data size > 1mb", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second*10) t.Cleanup(cancel) // generate test data - eds, dah, nID := generateTestEDS(t) - require.NoError(t, edsStore.Put(ctx, dah.Hash(), eds)) + namespace := sharetest.RandV0Namespace() + randEDS, dah := singleNamespaceEds(t, namespace, 64) + require.NoError(t, edsStore.Put(ctx, dah.Hash(), randEDS)) peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ DataHash: dah.Hash(), Height: 1, }) - got, err := getter.GetSharesByNamespace(ctx, &dah, nID) + got, err := getter.GetSharesByNamespace(ctx, &dah, namespace) require.NoError(t, err) - require.NoError(t, got.Verify(&dah, nID)) + require.NoError(t, got.Verify(&dah, namespace)) }) t.Run("ND_err_not_found", func(t *testing.T) { @@ -78,50 +86,79 @@ func TestShrexGetter(t *testing.T) { t.Cleanup(cancel) // generate test data - _, dah, nID := generateTestEDS(t) + _, dah, namespace := generateTestEDS(t) peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ DataHash: dah.Hash(), Height: 1, }) - _, err := getter.GetSharesByNamespace(ctx, &dah, nID) + _, err := getter.GetSharesByNamespace(ctx, &dah, namespace) require.ErrorIs(t, err, share.ErrNotFound) }) - t.Run("ND_namespace_not_found", func(t *testing.T) { + t.Run("ND_namespace_not_included", func(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Second) t.Cleanup(cancel) // generate test data - eds, dah, nID := generateTestEDS(t) + eds, dah, maxNamespace := generateTestEDS(t) require.NoError(t, edsStore.Put(ctx, dah.Hash(), eds)) peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ DataHash: dah.Hash(), Height: 1, }) - // corrupt NID - nID[4]++ + namespace, err := addToNamespace(maxNamespace, -1) + require.NoError(t, err) + // check for namespace to be between max and min namespace in root + require.Len(t, filterRootsByNamespace(&dah, namespace), 1) - _, err := getter.GetSharesByNamespace(ctx, &dah, nID) - require.ErrorIs(t, err, share.ErrNamespaceNotFound) + emptyShares, err := getter.GetSharesByNamespace(ctx, &dah, namespace) + require.NoError(t, err) + // no shares should be returned + require.Empty(t, emptyShares.Flatten()) + require.Nil(t, emptyShares.Verify(&dah, namespace)) }) - t.Run("EDS_Available", func(t *testing.T) { + t.Run("ND_namespace_not_in_dah", func(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Second) t.Cleanup(cancel) // generate test data - eds, dah, _ := generateTestEDS(t) + eds, dah, maxNamesapce := generateTestEDS(t) require.NoError(t, edsStore.Put(ctx, dah.Hash(), eds)) peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ DataHash: dah.Hash(), Height: 1, }) + namespace, err := addToNamespace(maxNamesapce, 1) + require.NoError(t, err) + // check for namespace to be not in root + require.Len(t, filterRootsByNamespace(&dah, namespace), 0) + + emptyShares, err := getter.GetSharesByNamespace(ctx, &dah, namespace) + require.NoError(t, err) + // no shares should be returned + require.Empty(t, emptyShares.Flatten()) + require.Nil(t, emptyShares.Verify(&dah, namespace)) + }) + + t.Run("EDS_Available", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + // generate test data + randEDS, dah, _ := generateTestEDS(t) + require.NoError(t, edsStore.Put(ctx, dah.Hash(), randEDS)) + peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ + DataHash: dah.Hash(), + Height: 1, + }) + got, err := getter.GetEDS(ctx, &dah) require.NoError(t, err) - require.Equal(t, eds.Flattened(), got.Flattened()) + require.Equal(t, randEDS.Flattened(), got.Flattened()) }) t.Run("EDS_ctx_deadline", func(t *testing.T) { @@ -163,11 +200,12 @@ func newStore(t *testing.T) (*eds.Store, error) { return eds.NewStore(tmpDir, ds) } -func generateTestEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, da.DataAvailabilityHeader, nmtnamespace.ID) { - eds := share.RandEDS(t, 4) - dah := da.NewDataAvailabilityHeader(eds) - randNID := dah.RowRoots[(len(dah.RowRoots)-1)/2][:namespace.NamespaceSize] - return eds, dah, randNID +func generateTestEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, da.DataAvailabilityHeader, share.Namespace) { + eds := edstest.RandEDS(t, 4) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) + max := nmt.MaxNamespace(dah.RowRoots[(len(dah.RowRoots))/2-1], share.NamespaceSize) + return eds, dah, max } func testManager( @@ -237,3 +275,125 @@ func newEDSClientServer( require.NoError(t, err) return client, server } + +// addToNamespace adds arbitrary int value to namespace, treating namespace as big-endian +// implementation of int +func addToNamespace(namespace share.Namespace, val int) (share.Namespace, error) { + if val == 0 { + return namespace, nil + } + // Convert the input integer to a byte slice and add it to result slice + result := make([]byte, len(namespace)) + if val > 0 { + binary.BigEndian.PutUint64(result[len(namespace)-8:], uint64(val)) + } else { + binary.BigEndian.PutUint64(result[len(namespace)-8:], uint64(-val)) + } + + // Perform addition byte by byte + var carry int + for i := len(namespace) - 1; i >= 0; i-- { + sum := 0 + if val > 0 { + sum = int(namespace[i]) + int(result[i]) + carry + } else { + sum = int(namespace[i]) - int(result[i]) + carry + } + + switch { + case sum > 255: + carry = 1 + sum -= 256 + case sum < 0: + carry = -1 + sum += 256 + default: + carry = 0 + } + + result[i] = uint8(sum) + } + + // Handle any remaining carry + if carry != 0 { + return nil, errors.New("namespace overflow") + } + + return result, nil +} + +func TestAddToNamespace(t *testing.T) { + testCases := []struct { + name string + value int + input share.Namespace + expected share.Namespace + expectedError error + }{ + { + name: "Positive value addition", + value: 42, + input: share.Namespace{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, + expected: share.Namespace{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x2b}, + expectedError: nil, + }, + { + name: "Negative value addition", + value: -42, + input: share.Namespace{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, + expected: share.Namespace{0x1, 0x1, 0x1, 0x1, 0x1, 0x01, 0x1, 0x1, 0x1, 0x0, 0xd7}, + expectedError: nil, + }, + { + name: "Overflow error", + value: 1, + input: share.Namespace{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + expected: nil, + expectedError: errors.New("namespace overflow"), + }, + { + name: "Overflow error negative", + value: -1, + input: share.Namespace{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + expected: nil, + expectedError: errors.New("namespace overflow"), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := addToNamespace(tc.input, tc.value) + if tc.expectedError == nil { + require.NoError(t, err) + require.Equal(t, tc.expected, result) + return + } + require.Error(t, err) + if err.Error() != tc.expectedError.Error() { + t.Errorf("Unexpected error message. Expected: %v, Got: %v", tc.expectedError, err) + } + }) + } +} + +func singleNamespaceEds( + t require.TestingT, + namespace share.Namespace, + size int, +) (*rsmt2d.ExtendedDataSquare, da.DataAvailabilityHeader) { + shares := make([]share.Share, size*size) + rnd := rand.New(rand.NewSource(time.Now().Unix())) + for i := range shares { + shr := make([]byte, share.Size) + copy(share.GetNamespace(shr), namespace) + _, err := rnd.Read(share.GetData(shr)) + require.NoError(t, err) + shares[i] = shr + } + sort.Slice(shares, func(i, j int) bool { return bytes.Compare(shares[i], shares[j]) < 0 }) + eds, err := rsmt2d.ComputeExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), wrapper.NewConstructor(uint64(size))) + require.NoError(t, err, "failure to recompute the extended data square") + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) + return eds, dah +} diff --git a/share/getters/store.go b/share/getters/store.go index 91200b78f3..989649f795 100644 --- a/share/getters/store.go +++ b/share/getters/store.go @@ -2,14 +2,12 @@ package getters import ( "context" - "encoding/hex" "errors" "fmt" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/libs/utils" @@ -38,7 +36,6 @@ func NewStoreGetter(store *eds.Store) *StoreGetter { func (sg *StoreGetter) GetShare(ctx context.Context, dah *share.Root, row, col int) (share.Share, error) { var err error ctx, span := tracer.Start(ctx, "store/get-share", trace.WithAttributes( - attribute.String("root", dah.String()), attribute.Int("row", row), attribute.Int("col", col), )) @@ -46,6 +43,12 @@ func (sg *StoreGetter) GetShare(ctx context.Context, dah *share.Root, row, col i utils.SetStatusAndEnd(span, err) }() + upperBound := len(dah.RowRoots) + if row >= upperBound || col >= upperBound { + err := share.ErrOutOfBounds + span.RecordError(err) + return nil, err + } root, leaf := ipld.Translate(dah, row, col) bs, err := sg.store.CARBlockstore(ctx, dah.Hash()) if errors.Is(err, eds.ErrNotFound) { @@ -58,7 +61,7 @@ func (sg *StoreGetter) GetShare(ctx context.Context, dah *share.Root, row, col i // wrap the read-only CAR blockstore in a getter blockGetter := eds.NewBlockGetter(bs) - s, err := share.GetShare(ctx, blockGetter, root, leaf, len(dah.RowRoots)) + s, err := ipld.GetShare(ctx, blockGetter, root, leaf, len(dah.RowRoots)) if errors.Is(err, ipld.ErrNodeNotFound) { // convert error to satisfy getter interface contract err = share.ErrNotFound @@ -72,9 +75,7 @@ func (sg *StoreGetter) GetShare(ctx context.Context, dah *share.Root, row, col i // GetEDS gets the EDS identified by the given root from the EDS store. func (sg *StoreGetter) GetEDS(ctx context.Context, root *share.Root) (data *rsmt2d.ExtendedDataSquare, err error) { - ctx, span := tracer.Start(ctx, "store/get-eds", trace.WithAttributes( - attribute.String("root", root.String()), - )) + ctx, span := tracer.Start(ctx, "store/get-eds") defer func() { utils.SetStatusAndEnd(span, err) }() @@ -95,19 +96,17 @@ func (sg *StoreGetter) GetEDS(ctx context.Context, root *share.Root) (data *rsmt func (sg *StoreGetter) GetSharesByNamespace( ctx context.Context, root *share.Root, - nID namespace.ID, + namespace share.Namespace, ) (shares share.NamespacedShares, err error) { ctx, span := tracer.Start(ctx, "store/get-shares-by-namespace", trace.WithAttributes( - attribute.String("root", root.String()), - attribute.String("nid", hex.EncodeToString(nID)), + attribute.String("namespace", namespace.String()), )) defer func() { utils.SetStatusAndEnd(span, err) }() - err = verifyNIDSize(nID) - if err != nil { - return nil, fmt.Errorf("getter/store: invalid namespace ID: %w", err) + if err = namespace.ValidateForData(); err != nil { + return nil, err } bs, err := sg.store.CARBlockstore(ctx, root.Hash()) @@ -121,11 +120,7 @@ func (sg *StoreGetter) GetSharesByNamespace( // wrap the read-only CAR blockstore in a getter blockGetter := eds.NewBlockGetter(bs) - shares, err = collectSharesByNamespace(ctx, blockGetter, root, nID) - if errors.Is(err, ipld.ErrNodeNotFound) { - // convert error to satisfy getter interface contract - err = share.ErrNotFound - } + shares, err = collectSharesByNamespace(ctx, blockGetter, root, namespace) if err != nil { return nil, fmt.Errorf("getter/store: failed to retrieve shares by namespace: %w", err) } diff --git a/share/getters/tee.go b/share/getters/tee.go index 50e2e1b55d..9c89b2dec5 100644 --- a/share/getters/tee.go +++ b/share/getters/tee.go @@ -2,7 +2,6 @@ package getters import ( "context" - "encoding/hex" "errors" "fmt" @@ -10,12 +9,12 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/ipld" ) var _ share.Getter = (*TeeGetter)(nil) @@ -37,7 +36,6 @@ func NewTeeGetter(getter share.Getter, store *eds.Store) *TeeGetter { func (tg *TeeGetter) GetShare(ctx context.Context, root *share.Root, row, col int) (share share.Share, err error) { ctx, span := tracer.Start(ctx, "tee/get-share", trace.WithAttributes( - attribute.String("root", root.String()), attribute.Int("row", row), attribute.Int("col", col), )) @@ -49,13 +47,15 @@ func (tg *TeeGetter) GetShare(ctx context.Context, root *share.Root, row, col in } func (tg *TeeGetter) GetEDS(ctx context.Context, root *share.Root) (eds *rsmt2d.ExtendedDataSquare, err error) { - ctx, span := tracer.Start(ctx, "tee/get-eds", trace.WithAttributes( - attribute.String("root", root.String()), - )) + ctx, span := tracer.Start(ctx, "tee/get-eds") defer func() { utils.SetStatusAndEnd(span, err) }() + adder := ipld.NewProofsAdder(len(root.RowRoots)) + ctx = ipld.CtxWithProofsAdder(ctx, adder) + defer adder.Purge() + eds, err = tg.getter.GetEDS(ctx, root) if err != nil { return nil, err @@ -72,15 +72,14 @@ func (tg *TeeGetter) GetEDS(ctx context.Context, root *share.Root) (eds *rsmt2d. func (tg *TeeGetter) GetSharesByNamespace( ctx context.Context, root *share.Root, - id namespace.ID, + namespace share.Namespace, ) (shares share.NamespacedShares, err error) { ctx, span := tracer.Start(ctx, "tee/get-shares-by-namespace", trace.WithAttributes( - attribute.String("root", root.String()), - attribute.String("nid", hex.EncodeToString(id)), + attribute.String("namespace", namespace.String()), )) defer func() { utils.SetStatusAndEnd(span, err) }() - return tg.getter.GetSharesByNamespace(ctx, root, id) + return tg.getter.GetSharesByNamespace(ctx, root, namespace) } diff --git a/share/getters/testing.go b/share/getters/testing.go index a90b937a51..71c6231f3c 100644 --- a/share/getters/testing.go +++ b/share/getters/testing.go @@ -5,17 +5,20 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/require" + "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" ) // TestGetter provides a testing SingleEDSGetter and the root of the EDS it holds. func TestGetter(t *testing.T) (share.Getter, *share.Root) { - eds := share.RandEDS(t, 8) - dah := da.NewDataAvailabilityHeader(eds) + eds := edstest.RandEDS(t, 8) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) return &SingleEDSGetter{ EDS: eds, }, &dah @@ -46,13 +49,16 @@ func (seg *SingleEDSGetter) GetEDS(_ context.Context, root *share.Root) (*rsmt2d } // GetSharesByNamespace returns NamespacedShares from a kept EDS if the correct root is given. -func (seg *SingleEDSGetter) GetSharesByNamespace(context.Context, *share.Root, namespace.ID, +func (seg *SingleEDSGetter) GetSharesByNamespace(context.Context, *share.Root, share.Namespace, ) (share.NamespacedShares, error) { panic("SingleEDSGetter: GetSharesByNamespace is not implemented") } func (seg *SingleEDSGetter) checkRoot(root *share.Root) error { - dah := da.NewDataAvailabilityHeader(seg.EDS) + dah, err := da.NewDataAvailabilityHeader(seg.EDS) + if err != nil { + return err + } if !root.Equals(&dah) { return fmt.Errorf("unknown EDS: have %s, asked %s", dah.String(), root.String()) } diff --git a/share/getters/utils.go b/share/getters/utils.go index c99a7689b8..5305c5c737 100644 --- a/share/getters/utils.go +++ b/share/getters/utils.go @@ -2,7 +2,6 @@ package getters import ( "context" - "encoding/hex" "errors" "fmt" "time" @@ -15,9 +14,6 @@ import ( "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/nmt/namespace" - "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/ipld" @@ -31,36 +27,34 @@ var ( ) // filterRootsByNamespace returns the row roots from the given share.Root that contain the passed -// namespace ID. -func filterRootsByNamespace(root *share.Root, nID namespace.ID) []cid.Cid { +// namespace. +func filterRootsByNamespace(root *share.Root, namespace share.Namespace) []cid.Cid { rowRootCIDs := make([]cid.Cid, 0, len(root.RowRoots)) for _, row := range root.RowRoots { - if !nID.Less(nmt.MinNamespace(row, nID.Size())) && nID.LessOrEqual(nmt.MaxNamespace(row, nID.Size())) { + if !namespace.IsOutsideRange(row, row) { rowRootCIDs = append(rowRootCIDs, ipld.MustCidFromNamespacedSha256(row)) } } return rowRootCIDs } -// collectSharesByNamespace collects NamespaceShares within the given namespace ID from the given -// share.Root. +// collectSharesByNamespace collects NamespaceShares within the given namespace from share.Root. func collectSharesByNamespace( ctx context.Context, bg blockservice.BlockGetter, root *share.Root, - nID namespace.ID, + namespace share.Namespace, ) (shares share.NamespacedShares, err error) { ctx, span := tracer.Start(ctx, "collect-shares-by-namespace", trace.WithAttributes( - attribute.String("root", root.String()), - attribute.String("nid", hex.EncodeToString(nID)), + attribute.String("namespace", namespace.String()), )) defer func() { utils.SetStatusAndEnd(span, err) }() - rootCIDs := filterRootsByNamespace(root, nID) + rootCIDs := filterRootsByNamespace(root, namespace) if len(rootCIDs) == 0 { - return nil, share.ErrNamespaceNotFound + return nil, nil } errGroup, ctx := errgroup.WithContext(ctx) @@ -69,13 +63,13 @@ func collectSharesByNamespace( // shadow loop variables, to ensure correct values are captured i, rootCID := i, rootCID errGroup.Go(func() error { - row, proof, err := share.GetSharesByNamespace(ctx, bg, rootCID, nID, len(root.RowRoots)) + row, proof, err := ipld.GetSharesByNamespace(ctx, bg, rootCID, namespace, len(root.RowRoots)) shares[i] = share.NamespacedRow{ Shares: row, Proof: proof, } if err != nil { - return fmt.Errorf("retrieving nID %x for row %x: %w", nID, rootCID, err) + return fmt.Errorf("retrieving shares by namespace %s for row %x: %w", namespace.String(), rootCID, err) } return nil }) @@ -85,22 +79,9 @@ func collectSharesByNamespace( return nil, err } - // return ErrNamespaceNotFound if no shares are found for the namespace.ID - if len(rootCIDs) == 1 && len(shares[0].Shares) == 0 { - return nil, share.ErrNamespaceNotFound - } - return shares, nil } -func verifyNIDSize(nID namespace.ID) error { - if len(nID) != share.NamespaceSize { - return fmt.Errorf("expected namespace ID of size %d, got %d", - share.NamespaceSize, len(nID)) - } - return nil -} - // ctxWithSplitTimeout will split timeout stored in context by splitFactor and return the result if // it is greater than minTimeout. minTimeout == 0 will be ignored, splitFactor <= 0 will be ignored func ctxWithSplitTimeout( diff --git a/share/getters/utils_test.go b/share/getters/utils_test.go index 73e9400010..65de9d47f2 100644 --- a/share/getters/utils_test.go +++ b/share/getters/utils_test.go @@ -1,11 +1,14 @@ package getters import ( + "context" "errors" "fmt" "testing" + "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func Test_ErrorContains(t *testing.T) { @@ -101,3 +104,122 @@ func Test_ErrorContains(t *testing.T) { }) } } + +func Test_ctxWithSplitTimeout(t *testing.T) { + type args struct { + ctxTimeout time.Duration + splitFactor []int + minTimeout time.Duration + } + tests := []struct { + name string + args args + want time.Duration + }{ + { + name: "ctxTimeout > minTimeout, splitFactor <= 0", + args: args{ + ctxTimeout: 3 * time.Minute, + splitFactor: []int{-1, 0}, + minTimeout: time.Minute, + }, + want: time.Minute, + }, + { + name: "ctxTimeout > minTimeout, splitFactor = 1", + args: args{ + ctxTimeout: 3 * time.Minute, + splitFactor: []int{1}, + minTimeout: time.Minute, + }, + want: 3 * time.Minute, + }, + { + name: "ctxTimeout > minTimeout, splitFactor = 2", + args: args{ + ctxTimeout: 3 * time.Minute, + splitFactor: []int{2}, + minTimeout: time.Minute, + }, + want: 3 * time.Minute / 2, + }, + { + name: "ctxTimeout > minTimeout, resulted timeout limited by minTimeout", + args: args{ + ctxTimeout: 3 * time.Minute, + splitFactor: []int{3, 4, 5}, + minTimeout: time.Minute, + }, + want: time.Minute, + }, + { + name: "ctxTimeout < minTimeout", + args: args{ + ctxTimeout: time.Minute, + splitFactor: []int{-1, 0, 1, 2, 3}, + minTimeout: 2 * time.Minute, + }, + want: time.Minute, + }, + { + name: "minTimeout = 0, splitFactor <= 1", + args: args{ + ctxTimeout: time.Minute, + splitFactor: []int{-1, 0, 1}, + minTimeout: 0, + }, + want: time.Minute, + }, + { + name: "minTimeout = 0, splitFactor > 1", + args: args{ + ctxTimeout: time.Minute, + splitFactor: []int{2}, + minTimeout: 0, + }, + want: time.Minute / 2, + }, + { + name: "no context timeout", + args: args{ + ctxTimeout: 0, + splitFactor: []int{-1, 0, 1, 2}, + minTimeout: time.Minute, + }, + want: time.Minute, + }, + { + name: "no context timeout, minTimeout = 0", + args: args{ + ctxTimeout: 0, + splitFactor: []int{-1, 0, 1, 2}, + minTimeout: 0, + }, + want: 0, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for _, sf := range tt.args.splitFactor { + ctx, cancel := context.WithCancel(context.Background()) + // add timeout if original context should have it + if tt.args.ctxTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, tt.args.ctxTimeout) + } + t.Cleanup(cancel) + got, _ := ctxWithSplitTimeout(ctx, sf, tt.args.minTimeout) + dl, ok := got.Deadline() + // in case no deadline is found in ctx or not expected to be found, check both cases apply at the + // same time + if !ok || tt.want == 0 { + require.False(t, ok) + require.Equal(t, tt.want, time.Duration(0)) + continue + } + d := time.Until(dl) + require.True(t, d <= tt.want+time.Second) + require.True(t, d >= tt.want-time.Second) + } + }) + } +} diff --git a/share/add.go b/share/ipld/add.go similarity index 60% rename from share/add.go rename to share/ipld/add.go index 02016cadf6..7e5909669d 100644 --- a/share/add.go +++ b/share/ipld/add.go @@ -1,4 +1,4 @@ -package share +package ipld import ( "context" @@ -11,14 +11,14 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share" ) // AddShares erasures and extends shares to blockservice.BlockService using the provided // ipld.NodeAdder. func AddShares( ctx context.Context, - shares []Share, + shares []share.Share, adder blockservice.BlockService, ) (*rsmt2d.ExtendedDataSquare, error) { if len(shares) == 0 { @@ -26,12 +26,12 @@ func AddShares( } squareSize := int(utils.SquareSize(len(shares))) // create nmt adder wrapping batch adder with calculated size - batchAdder := ipld.NewNmtNodeAdder(ctx, adder, ipld.MaxSizeBatchOption(squareSize*2)) + batchAdder := NewNmtNodeAdder(ctx, adder, MaxSizeBatchOption(squareSize*2)) // create the nmt wrapper to generate row and col commitments // recompute the eds eds, err := rsmt2d.ComputeExtendedDataSquare( shares, - DefaultRSMT2DCodec(), + share.DefaultRSMT2DCodec(), wrapper.NewConstructor(uint64(squareSize), nmt.NodeVisitor(batchAdder.Visit)), ) @@ -39,7 +39,10 @@ func AddShares( return nil, fmt.Errorf("failure to recompute the extended data square: %w", err) } // compute roots - eds.RowRoots() + _, err = eds.RowRoots() + if err != nil { + return nil, err + } // commit the batch to ipfs return eds, batchAdder.Commit() } @@ -55,11 +58,11 @@ func ImportShares( } squareSize := int(utils.SquareSize(len(shares))) // create nmt adder wrapping batch adder with calculated size - batchAdder := ipld.NewNmtNodeAdder(ctx, adder, ipld.MaxSizeBatchOption(squareSize*2)) + batchAdder := NewNmtNodeAdder(ctx, adder, MaxSizeBatchOption(squareSize*2)) // recompute the eds eds, err := rsmt2d.ImportExtendedDataSquare( shares, - DefaultRSMT2DCodec(), + share.DefaultRSMT2DCodec(), wrapper.NewConstructor(uint64(squareSize/2), nmt.NodeVisitor(batchAdder.Visit)), ) @@ -67,36 +70,16 @@ func ImportShares( return nil, fmt.Errorf("failure to recompute the extended data square: %w", err) } // compute roots - eds.RowRoots() + _, err = eds.RowRoots() + if err != nil { + return nil, err + } // commit the batch to DAG return eds, batchAdder.Commit() } -// ExtractODS returns the original shares of the given ExtendedDataSquare. This -// is a helper function for circumstances where AddShares must be used after the EDS has already -// been generated. -func ExtractODS(eds *rsmt2d.ExtendedDataSquare) []Share { - origWidth := eds.Width() / 2 - origShares := make([][]byte, origWidth*origWidth) - for i := uint(0); i < origWidth; i++ { - row := eds.Row(i) - for j := uint(0); j < origWidth; j++ { - origShares[(i*origWidth)+j] = row[j] - } - } - return origShares -} - -// ExtractEDS takes an EDS and extracts all shares from it in a flattened slice(row by row). -func ExtractEDS(eds *rsmt2d.ExtendedDataSquare) []Share { - flattenedEDSSize := eds.Width() * eds.Width() - out := make([][]byte, flattenedEDSSize) - count := 0 - for i := uint(0); i < eds.Width(); i++ { - for _, share := range eds.Row(i) { - out[count] = share - count++ - } - } - return out +func ImportEDS(ctx context.Context, square *rsmt2d.ExtendedDataSquare, adder blockservice.BlockService) error { + shares := square.Flattened() + _, err := ImportShares(ctx, shares, adder) + return err } diff --git a/share/ipld/get.go b/share/ipld/get.go index 70385f73f7..35f601853d 100644 --- a/share/ipld/get.go +++ b/share/ipld/get.go @@ -12,6 +12,8 @@ import ( ipld "github.com/ipfs/go-ipld-format" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + + "github.com/celestiaorg/celestia-node/share" ) // NumWorkersLimit sets global limit for workers spawned by GetShares. @@ -26,7 +28,7 @@ import ( // // TODO(@Wondertan): This assumes we have parallelized DASer implemented. Sync the values once it is shipped. // TODO(@Wondertan): Allow configuration of values without global state. -var NumWorkersLimit = MaxSquareSize * MaxSquareSize / 2 * NumConcurrentSquares +var NumWorkersLimit = share.MaxSquareSize * share.MaxSquareSize / 2 * NumConcurrentSquares // NumConcurrentSquares limits the amount of squares that are fetched // concurrently/simultaneously. @@ -103,7 +105,7 @@ func GetLeaves(ctx context.Context, // this buffer ensures writes to 'jobs' are never blocking (bin-tree-feat) jobs := make(chan *job, (maxShares+1)/2) // +1 for the case where 'maxShares' is 1 - jobs <- &job{id: root, ctx: ctx} + jobs <- &job{cid: root, ctx: ctx} // total is an amount of routines spawned and total amount of nodes we process (bin-tree-feat) // so we can specify exact amount of loops we do, and wait for this amount // of routines to finish processing @@ -123,11 +125,11 @@ func GetLeaves(ctx context.Context, defer wg.Done() span.SetAttributes( - attribute.String("cid", j.id.String()), + attribute.String("cid", j.cid.String()), attribute.Int("pos", j.sharePos), ) - nd, err := GetNode(ctx, bGetter, j.id) + nd, err := GetNode(ctx, bGetter, j.cid) if err != nil { // we don't really care about errors here // just fetch as much as possible @@ -149,7 +151,7 @@ func GetLeaves(ctx context.Context, // send those to be processed select { case jobs <- &job{ - id: lnk.Cid, + cid: lnk.Cid, // calc position for children nodes (bin-tree-feat), // s.t. 'if' above knows where to put a share sharePos: j.sharePos*2 + i, @@ -213,7 +215,7 @@ func GetProof( // chanGroup implements an atomic wait group, closing a jobs chan // when fully done. type chanGroup struct { - jobs chan *job + jobs chan job counter int64 } @@ -233,8 +235,29 @@ func (w *chanGroup) done() { // job represents an encountered node to investigate during the `GetLeaves` // and `CollectLeavesByNamespace` routines. type job struct { - id cid.Cid + // we pass the context to job so that spans are tracked in a tree + // structure + ctx context.Context + // cid of the node that will be handled + cid cid.Cid + // sharePos represents potential share position in share slice sharePos int - depth int - ctx context.Context + // depth represents the number of edges present in path from the root node of a tree to that node + depth int + // isAbsent indicates if target namespaceID is not included, only collect absence proofs + isAbsent bool +} + +func (j job) next(direction direction, cid cid.Cid, isAbsent bool) job { + var i int + if direction == right { + i++ + } + return job{ + ctx: j.ctx, + cid: cid, + sharePos: j.sharePos*2 + i, + depth: j.depth + 1, + isAbsent: isAbsent, + } } diff --git a/share/get.go b/share/ipld/get_shares.go similarity index 69% rename from share/get.go rename to share/ipld/get_shares.go index 9ec4c3c2c5..0bed240fdc 100644 --- a/share/get.go +++ b/share/ipld/get_shares.go @@ -1,4 +1,4 @@ -package share +package ipld import ( "context" @@ -8,9 +8,8 @@ import ( format "github.com/ipfs/go-ipld-format" "github.com/celestiaorg/nmt" - "github.com/celestiaorg/nmt/namespace" - "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share" ) // GetShare fetches and returns the data for leaf `leafIndex` of root `rootCid`. @@ -20,8 +19,8 @@ func GetShare( rootCid cid.Cid, leafIndex int, totalLeafs int, // this corresponds to the extended square width -) (Share, error) { - nd, err := ipld.GetLeaf(ctx, bGetter, rootCid, leafIndex, totalLeafs) +) (share.Share, error) { + nd, err := GetLeaf(ctx, bGetter, rootCid, leafIndex, totalLeafs) if err != nil { return nil, err } @@ -32,30 +31,30 @@ func GetShare( // GetShares walks the tree of a given root and puts shares into the given 'put' func. // Does not return any error, and returns/unblocks only on success // (got all shares) or on context cancellation. -func GetShares(ctx context.Context, bGetter blockservice.BlockGetter, root cid.Cid, shares int, put func(int, Share)) { +func GetShares(ctx context.Context, bg blockservice.BlockGetter, root cid.Cid, shares int, put func(int, share.Share)) { ctx, span := tracer.Start(ctx, "get-shares") defer span.End() putNode := func(i int, leaf format.Node) { put(i, leafToShare(leaf)) } - ipld.GetLeaves(ctx, bGetter, root, shares, putNode) + GetLeaves(ctx, bg, root, shares, putNode) } // GetSharesByNamespace walks the tree of a given root and returns its shares within the given -// namespace.ID. If a share could not be retrieved, err is not nil, and the returned array +// Namespace. If a share could not be retrieved, err is not nil, and the returned array // contains nil shares in place of the shares it was unable to retrieve. func GetSharesByNamespace( ctx context.Context, bGetter blockservice.BlockGetter, root cid.Cid, - nID namespace.ID, + namespace share.Namespace, maxShares int, -) ([]Share, *nmt.Proof, error) { +) ([]share.Share, *nmt.Proof, error) { ctx, span := tracer.Start(ctx, "get-shares-by-namespace") defer span.End() - data := ipld.NewNamespaceData(maxShares, nID, ipld.WithLeaves(), ipld.WithProofs()) + data := NewNamespaceData(maxShares, namespace, WithLeaves(), WithProofs()) err := data.CollectLeavesByNamespace(ctx, bGetter, root) if err != nil { return nil, nil, err @@ -63,7 +62,7 @@ func GetSharesByNamespace( leaves := data.Leaves() - shares := make([]Share, len(leaves)) + shares := make([]share.Share, len(leaves)) for i, leaf := range leaves { if leaf != nil { shares[i] = leafToShare(leaf) @@ -73,8 +72,8 @@ func GetSharesByNamespace( } // leafToShare converts an NMT leaf into a Share. -func leafToShare(nd format.Node) Share { +func leafToShare(nd format.Node) share.Share { // * Additional namespace is prepended so that parity data can be identified with a parity // namespace, which we cut off - return nd.RawData()[NamespaceSize:] + return share.GetData(nd.RawData()) } diff --git a/share/get_test.go b/share/ipld/get_shares_test.go similarity index 51% rename from share/get_test.go rename to share/ipld/get_shares_test.go index 8eafe84cd8..dd6c3a6636 100644 --- a/share/get_test.go +++ b/share/ipld/get_shares_test.go @@ -1,29 +1,33 @@ -package share +package ipld import ( + "bytes" "context" + "crypto/sha256" + "errors" mrand "math/rand" + "sort" "strconv" "testing" "time" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange/offline" "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - offline "github.com/ipfs/go-ipfs-exchange-offline" mdutils "github.com/ipfs/go-merkledag/test" - "github.com/minio/sha256-simd" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" ) func TestGetShare(t *testing.T) { @@ -34,14 +38,16 @@ func TestGetShare(t *testing.T) { bServ := mdutils.Bserv() // generate random shares for the nmt - shares := RandShares(t, size*size) + shares := sharetest.RandShares(t, size*size) eds, err := AddShares(ctx, shares, bServ) require.NoError(t, err) for i, leaf := range shares { row := i / size pos := i - (size * row) - share, err := GetShare(ctx, bServ, ipld.MustCidFromNamespacedSha256(eds.RowRoots()[row]), pos, size*2) + rowRoots, err := eds.RowRoots() + require.NoError(t, err) + share, err := GetShare(ctx, bServ, MustCidFromNamespacedSha256(rowRoots[row]), pos, size*2) require.NoError(t, err) assert.Equal(t, leaf, share) } @@ -54,12 +60,12 @@ func TestBlockRecovery(t *testing.T) { extendedShareCount := extendedSquareWidth * extendedSquareWidth // generate test data - quarterShares := RandShares(t, shareCount) - allShares := RandShares(t, shareCount) + quarterShares := sharetest.RandShares(t, shareCount) + allShares := sharetest.RandShares(t, shareCount) testCases := []struct { name string - shares []Share + shares []share.Share expectErr bool errString string d int // number of shares to delete @@ -75,25 +81,31 @@ func TestBlockRecovery(t *testing.T) { t.Run(tc.name, func(t *testing.T) { squareSize := utils.SquareSize(len(tc.shares)) - eds, err := rsmt2d.ComputeExtendedDataSquare(tc.shares, DefaultRSMT2DCodec(), wrapper.NewConstructor(squareSize)) + testEds, err := rsmt2d.ComputeExtendedDataSquare( + tc.shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(squareSize), + ) require.NoError(t, err) // calculate roots using the first complete square - rowRoots := eds.RowRoots() - colRoots := eds.ColRoots() + rowRoots, err := testEds.RowRoots() + require.NoError(t, err) + colRoots, err := testEds.ColRoots() + require.NoError(t, err) - flat := ExtractEDS(eds) + flat := testEds.Flattened() // recover a partially complete square rdata := removeRandShares(flat, tc.d) - eds, err = rsmt2d.ImportExtendedDataSquare( + testEds, err = rsmt2d.ImportExtendedDataSquare( rdata, - DefaultRSMT2DCodec(), + share.DefaultRSMT2DCodec(), wrapper.NewConstructor(squareSize), ) require.NoError(t, err) - err = eds.Repair(rowRoots, colRoots) + err = testEds.Repair(rowRoots, colRoots) if tc.expectErr { require.Error(t, err) require.Contains(t, err.Error(), tc.errString) @@ -101,27 +113,27 @@ func TestBlockRecovery(t *testing.T) { } assert.NoError(t, err) - reds, err := rsmt2d.ImportExtendedDataSquare(rdata, DefaultRSMT2DCodec(), wrapper.NewConstructor(squareSize)) + reds, err := rsmt2d.ImportExtendedDataSquare(rdata, share.DefaultRSMT2DCodec(), wrapper.NewConstructor(squareSize)) require.NoError(t, err) // check that the squares are equal - assert.Equal(t, ExtractEDS(eds), ExtractEDS(reds)) + assert.Equal(t, testEds.Flattened(), reds.Flattened()) }) } } func Test_ConvertEDStoShares(t *testing.T) { squareWidth := 16 - shares := RandShares(t, squareWidth*squareWidth) + shares := sharetest.RandShares(t, squareWidth*squareWidth) // compute extended square - eds, err := rsmt2d.ComputeExtendedDataSquare( + testEds, err := rsmt2d.ComputeExtendedDataSquare( shares, - DefaultRSMT2DCodec(), + share.DefaultRSMT2DCodec(), wrapper.NewConstructor(uint64(squareWidth)), ) require.NoError(t, err) - resshares := ExtractODS(eds) + resshares := testEds.FlattenedODS() require.Equal(t, shares, resshares) } @@ -141,33 +153,38 @@ func removeRandShares(data [][]byte, d int) [][]byte { } func TestGetSharesByNamespace(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) bServ := mdutils.Bserv() var tests = []struct { - rawData []Share + rawData []share.Share }{ - {rawData: RandShares(t, 4)}, - {rawData: RandShares(t, 16)}, + {rawData: sharetest.RandShares(t, 4)}, + {rawData: sharetest.RandShares(t, 16)}, } for i, tt := range tests { t.Run(strconv.Itoa(i), func(t *testing.T) { - // choose random nID from rand shares + // choose random namespace from rand shares expected := tt.rawData[len(tt.rawData)/2] - nID := expected[:NamespaceSize] + namespace := share.GetNamespace(expected) - // change rawData to contain several shares with same nID + // change rawData to contain several shares with same namespace tt.rawData[(len(tt.rawData)/2)+1] = expected // put raw data in BlockService eds, err := AddShares(ctx, tt.rawData, bServ) require.NoError(t, err) - var shares []Share - for _, row := range eds.RowRoots() { - rcid := ipld.MustCidFromNamespacedSha256(row) - rowShares, _, err := GetSharesByNamespace(ctx, bServ, rcid, nID, len(eds.RowRoots())) + var shares []share.Share + rowRoots, err := eds.RowRoots() + require.NoError(t, err) + for _, row := range rowRoots { + rcid := MustCidFromNamespacedSha256(row) + rowShares, _, err := GetSharesByNamespace(ctx, bServ, rcid, namespace, len(rowRoots)) + if errors.Is(err, ErrNamespaceOutsideRange) { + continue + } require.NoError(t, err) shares = append(shares, rowShares...) @@ -182,113 +199,103 @@ func TestGetSharesByNamespace(t *testing.T) { } func TestCollectLeavesByNamespace_IncompleteData(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) bServ := mdutils.Bserv() - shares := RandShares(t, 16) + shares := sharetest.RandShares(t, 16) // set all shares to the same namespace id - nid := shares[0][:NamespaceSize] - - for i, nspace := range shares { - if i == len(shares) { - break - } - - copy(nspace[:NamespaceSize], nid) + namespace := share.GetNamespace(shares[0]) + for _, shr := range shares { + copy(share.GetNamespace(shr), namespace) } eds, err := AddShares(ctx, shares, bServ) require.NoError(t, err) - roots := eds.RowRoots() + roots, err := eds.RowRoots() + require.NoError(t, err) // remove the second share from the first row - rcid := ipld.MustCidFromNamespacedSha256(roots[0]) - node, err := ipld.GetNode(ctx, bServ, rcid) + rcid := MustCidFromNamespacedSha256(roots[0]) + node, err := GetNode(ctx, bServ, rcid) require.NoError(t, err) // Left side of the tree contains the original shares - data, err := ipld.GetNode(ctx, bServ, node.Links()[0].Cid) + data, err := GetNode(ctx, bServ, node.Links()[0].Cid) require.NoError(t, err) // Second share is the left side's right child - l, err := ipld.GetNode(ctx, bServ, data.Links()[0].Cid) + l, err := GetNode(ctx, bServ, data.Links()[0].Cid) require.NoError(t, err) - r, err := ipld.GetNode(ctx, bServ, l.Links()[1].Cid) + r, err := GetNode(ctx, bServ, l.Links()[1].Cid) require.NoError(t, err) err = bServ.DeleteBlock(ctx, r.Cid()) require.NoError(t, err) - namespaceData := ipld.NewNamespaceData(len(shares), nid, ipld.WithLeaves()) + namespaceData := NewNamespaceData(len(shares), namespace, WithLeaves()) err = namespaceData.CollectLeavesByNamespace(ctx, bServ, rcid) + require.Error(t, err) leaves := namespaceData.Leaves() assert.Nil(t, leaves[1]) assert.Equal(t, 4, len(leaves)) - require.Error(t, err) } func TestCollectLeavesByNamespace_AbsentNamespaceId(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) bServ := mdutils.Bserv() - shares := RandShares(t, 16) - - minNid := make([]byte, NamespaceSize) - midNid := make([]byte, NamespaceSize) - maxNid := make([]byte, NamespaceSize) - - numberOfShares := len(shares) - - copy(minNid, shares[0][:NamespaceSize]) - copy(maxNid, shares[numberOfShares-1][:NamespaceSize]) - copy(midNid, shares[numberOfShares/2][:NamespaceSize]) - - // create min nid missing data by replacing first namespace id with second - minNidMissingData := make([]Share, len(shares)) - copy(minNidMissingData, shares) - copy(minNidMissingData[0][:NamespaceSize], shares[1][:NamespaceSize]) - - // create max nid missing data by replacing last namespace id with second last - maxNidMissingData := make([]Share, len(shares)) - copy(maxNidMissingData, shares) - copy(maxNidMissingData[numberOfShares-1][:NamespaceSize], shares[numberOfShares-2][:NamespaceSize]) + shares := sharetest.RandShares(t, 1024) - // create mid nid missing data by replacing middle namespace id with the one after - midNidMissingData := make([]Share, len(shares)) - copy(midNidMissingData, shares) - copy(midNidMissingData[numberOfShares/2][:NamespaceSize], shares[(numberOfShares/2)+1][:NamespaceSize]) + // set all shares to the same namespace + namespaces, err := randomNamespaces(5) + require.NoError(t, err) + minNamespace := namespaces[0] + minIncluded := namespaces[1] + midNamespace := namespaces[2] + maxIncluded := namespaces[3] + maxNamespace := namespaces[4] + + secondNamespaceFrom := mrand.Intn(len(shares)-2) + 1 + for i, shr := range shares { + if i < secondNamespaceFrom { + copy(share.GetNamespace(shr), minIncluded) + continue + } + copy(share.GetNamespace(shr), maxIncluded) + } var tests = []struct { - name string - data []Share - missingNid []byte + name string + data []share.Share + missingNamespace share.Namespace + isAbsence bool }{ - {name: "Namespace id less than the minimum namespace in data", data: minNidMissingData, missingNid: minNid}, - {name: "Namespace id greater than the maximum namespace in data", data: maxNidMissingData, missingNid: maxNid}, - {name: "Namespace id in range but still missing", data: midNidMissingData, missingNid: midNid}, + {name: "Namespace less than the minimum namespace in data", data: shares, missingNamespace: minNamespace}, + {name: "Namespace greater than the maximum namespace in data", data: shares, missingNamespace: maxNamespace}, + {name: "Namespace in range but still missing", data: shares, missingNamespace: midNamespace, isAbsence: true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { eds, err := AddShares(ctx, shares, bServ) require.NoError(t, err) - assertNoRowContainsNID(t, bServ, eds, tt.missingNid) + assertNoRowContainsNID(ctx, t, bServ, eds, tt.missingNamespace, tt.isAbsence) }) } } func TestCollectLeavesByNamespace_MultipleRowsContainingSameNamespaceId(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) bServ := mdutils.Bserv() - shares := RandShares(t, 16) + shares := sharetest.RandShares(t, 16) // set all shares to the same namespace and data but the last one - nid := shares[0][:NamespaceSize] + namespace := share.GetNamespace(shares[0]) commonNamespaceData := shares[0] for i, nspace := range shares { @@ -302,31 +309,37 @@ func TestCollectLeavesByNamespace_MultipleRowsContainingSameNamespaceId(t *testi eds, err := AddShares(ctx, shares, bServ) require.NoError(t, err) - for _, row := range eds.RowRoots() { - rcid := ipld.MustCidFromNamespacedSha256(row) - data := ipld.NewNamespaceData(len(shares), nid, ipld.WithLeaves()) + rowRoots, err := eds.RowRoots() + require.NoError(t, err) + + for _, row := range rowRoots { + rcid := MustCidFromNamespacedSha256(row) + data := NewNamespaceData(len(shares), namespace, WithLeaves()) err := data.CollectLeavesByNamespace(ctx, bServ, rcid) + if errors.Is(err, ErrNamespaceOutsideRange) { + continue + } assert.Nil(t, err) leaves := data.Leaves() for _, node := range leaves { // test that the data returned by collectLeavesByNamespace for nid // matches the commonNamespaceData that was copied across almost all data - assert.Equal(t, commonNamespaceData, node.RawData()[NamespaceSize:]) + assert.Equal(t, commonNamespaceData, share.GetData(node.RawData())) } } } func TestGetSharesWithProofsByNamespace(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) t.Cleanup(cancel) bServ := mdutils.Bserv() var tests = []struct { - rawData []Share + rawData []share.Share }{ - {rawData: RandShares(t, 4)}, - {rawData: RandShares(t, 16)}, - {rawData: RandShares(t, 64)}, + {rawData: sharetest.RandShares(t, 4)}, + {rawData: sharetest.RandShares(t, 16)}, + {rawData: sharetest.RandShares(t, 64)}, } for i, tt := range tests { @@ -341,9 +354,9 @@ func TestGetSharesWithProofsByNamespace(t *testing.T) { } expected := tt.rawData[from] - nID := expected[:NamespaceSize] + namespace := share.GetNamespace(expected) - // change rawData to contain several shares with same nID + // change rawData to contain several shares with same namespace for i := from; i <= to; i++ { tt.rawData[i] = expected } @@ -352,10 +365,16 @@ func TestGetSharesWithProofsByNamespace(t *testing.T) { eds, err := AddShares(ctx, tt.rawData, bServ) require.NoError(t, err) - var shares []Share - for _, row := range eds.RowRoots() { - rcid := ipld.MustCidFromNamespacedSha256(row) - rowShares, proof, err := GetSharesByNamespace(ctx, bServ, rcid, nID, len(eds.RowRoots())) + var shares []share.Share + rowRoots, err := eds.RowRoots() + require.NoError(t, err) + for _, row := range rowRoots { + rcid := MustCidFromNamespacedSha256(row) + rowShares, proof, err := GetSharesByNamespace(ctx, bServ, rcid, namespace, len(rowRoots)) + if namespace.IsOutsideRange(row, row) { + require.ErrorIs(t, err, ErrNamespaceOutsideRange) + continue + } require.NoError(t, err) if len(rowShares) > 0 { require.NotNil(t, proof) @@ -364,24 +383,24 @@ func TestGetSharesWithProofsByNamespace(t *testing.T) { // construct nodes from shares by prepending namespace var leaves [][]byte - for _, sh := range rowShares { - leaves = append(leaves, append(sh[:NamespaceSize], sh...)) + for _, shr := range rowShares { + leaves = append(leaves, append(share.GetNamespace(shr), shr...)) } // verify namespace verified := proof.VerifyNamespace( sha256.New(), - nID, + namespace.ToNMT(), leaves, - ipld.NamespacedSha256FromCID(rcid)) + NamespacedSha256FromCID(rcid)) require.True(t, verified) // verify inclusion verified = proof.VerifyInclusion( sha256.New(), - nID, + namespace.ToNMT(), rowShares, - ipld.NamespacedSha256FromCID(rcid)) + NamespacedSha256FromCID(rcid)) require.True(t, verified) } } @@ -405,7 +424,7 @@ func TestBatchSize(t *testing.T) { {"8", 8}, {"16", 16}, {"32", 32}, - // {"64", 64}, // test case too large for CI with race detector + {"64", 64}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -414,8 +433,8 @@ func TestBatchSize(t *testing.T) { bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - eds := RandEDS(t, tt.origWidth) - _, err := AddShares(ctx, ExtractODS(eds), blockservice.New(bs, offline.Exchange(bs))) + randEds := edstest.RandEDS(t, tt.origWidth) + _, err := AddShares(ctx, randEds.FlattenedODS(), blockservice.New(bs, offline.Exchange(bs))) require.NoError(t, err) out, err := bs.AllKeysChan(ctx) @@ -426,30 +445,65 @@ func TestBatchSize(t *testing.T) { count++ } extendedWidth := tt.origWidth * 2 - assert.Equalf(t, count, ipld.BatchSize(extendedWidth), "batchSize(%v)", extendedWidth) + assert.Equalf(t, count, BatchSize(extendedWidth), "batchSize(%v)", extendedWidth) }) } } func assertNoRowContainsNID( + ctx context.Context, t *testing.T, bServ blockservice.BlockService, eds *rsmt2d.ExtendedDataSquare, - nID namespace.ID, + namespace share.Namespace, + isAbsent bool, ) { - rowRootCount := len(eds.RowRoots()) + rowRoots, err := eds.RowRoots() + require.NoError(t, err) + rowRootCount := len(rowRoots) // get all row root cids rowRootCIDs := make([]cid.Cid, rowRootCount) - for i, rowRoot := range eds.RowRoots() { - rowRootCIDs[i] = ipld.MustCidFromNamespacedSha256(rowRoot) + for i, rowRoot := range rowRoots { + rowRootCIDs[i] = MustCidFromNamespacedSha256(rowRoot) } - // for each row root cid check if the minNID exists - for _, rowCID := range rowRootCIDs { - data := ipld.NewNamespaceData(rowRootCount, nID, ipld.WithProofs()) - err := data.CollectLeavesByNamespace(context.Background(), bServ, rowCID) - leaves := data.Leaves() - assert.Nil(t, leaves) - assert.Nil(t, err) + // for each row root cid check if the min namespace exists + var absentCount, foundAbsenceRows int + for _, rowRoot := range rowRoots { + var outsideRange bool + if !namespace.IsOutsideRange(rowRoot, rowRoot) { + // namespace does belong to namespace range of the row + absentCount++ + } else { + outsideRange = true + } + data := NewNamespaceData(rowRootCount, namespace, WithProofs()) + rootCID := MustCidFromNamespacedSha256(rowRoot) + err := data.CollectLeavesByNamespace(ctx, bServ, rootCID) + if outsideRange { + require.ErrorIs(t, err, ErrNamespaceOutsideRange) + continue + } + require.NoError(t, err) + + // if no error returned, check absence proof + foundAbsenceRows++ + verified := data.Proof().VerifyNamespace(sha256.New(), namespace.ToNMT(), nil, rowRoot) + require.True(t, verified) + } + + if isAbsent { + require.Equal(t, foundAbsenceRows, absentCount) + // there should be max 1 row that has namespace range containing namespace + require.LessOrEqual(t, absentCount, 1) + } +} + +func randomNamespaces(total int) ([]share.Namespace, error) { + namespaces := make([]share.Namespace, total) + for i := range namespaces { + namespaces[i] = sharetest.RandV0Namespace() } + sort.Slice(namespaces, func(i, j int) bool { return bytes.Compare(namespaces[i], namespaces[j]) < 0 }) + return namespaces, nil } diff --git a/share/ipld/namespace_data.go b/share/ipld/namespace_data.go index 13be26c06a..38dfdb2169 100644 --- a/share/ipld/namespace_data.go +++ b/share/ipld/namespace_data.go @@ -2,7 +2,6 @@ package ipld import ( "context" - "encoding/hex" "errors" "fmt" "sync" @@ -15,9 +14,13 @@ import ( "go.opentelemetry.io/otel/codes" "github.com/celestiaorg/nmt" - "github.com/celestiaorg/nmt/namespace" + + "github.com/celestiaorg/celestia-node/share" ) +var ErrNamespaceOutsideRange = errors.New("share/ipld: " + + "target namespace is outside of namespace range for the given root") + // Option is the functional option that is applied to the NamespaceData instance // to configure data that needs to be stored. type Option func(*NamespaceData) @@ -40,21 +43,25 @@ func WithProofs() Option { // NamespaceData stores all leaves under the given namespace with their corresponding proofs. type NamespaceData struct { - leaves []ipld.Node - proofs *proofCollector + leaves []ipld.Node + proofs *proofCollector + bounds fetchedBounds maxShares int - nID namespace.ID + namespace share.Namespace + + isAbsentNamespace atomic.Bool + absenceProofLeaf ipld.Node } -func NewNamespaceData(maxShares int, nID namespace.ID, options ...Option) *NamespaceData { +func NewNamespaceData(maxShares int, namespace share.Namespace, options ...Option) *NamespaceData { data := &NamespaceData{ // we don't know where in the tree the leaves in the namespace are, // so we keep track of the bounds to return the correct slice // maxShares acts as a sentinel to know if we find any leaves bounds: fetchedBounds{int64(maxShares), 0}, maxShares: maxShares, - nID: nID, + namespace: namespace, } for _, opt := range options { @@ -63,14 +70,19 @@ func NewNamespaceData(maxShares int, nID namespace.ID, options ...Option) *Names return data } -func (n *NamespaceData) validate() error { - if len(n.nID) != NamespaceSize { - return fmt.Errorf("expected namespace ID of size %d, got %d", NamespaceSize, len(n.nID)) +func (n *NamespaceData) validate(rootCid cid.Cid) error { + if err := n.namespace.Validate(); err != nil { + return err } if n.leaves == nil && n.proofs == nil { return errors.New("share/ipld: empty NamespaceData, nothing specified to retrieve") } + + root := NamespacedSha256FromCID(rootCid) + if n.namespace.IsOutsideRange(root, root) { + return ErrNamespaceOutsideRange + } return nil } @@ -78,6 +90,14 @@ func (n *NamespaceData) addLeaf(pos int, nd ipld.Node) { // bounds will be needed in `Proof` method n.bounds.update(int64(pos)) + if n.isAbsentNamespace.Load() { + if n.absenceProofLeaf != nil { + log.Fatal("there should be only one absence leaf") + } + n.absenceProofLeaf = nd + return + } + if n.leaves == nil { return } @@ -117,7 +137,7 @@ func (n *NamespaceData) addProof(d direction, cid cid.Cid, depth int) { // Leaves returns retrieved leaves within the bounds in case `WithLeaves` option was passed, // otherwise nil will be returned. func (n *NamespaceData) Leaves() []ipld.Node { - if n.leaves == nil || n.noLeaves() { + if n.leaves == nil || n.noLeaves() || n.isAbsentNamespace.Load() { return nil } return n.leaves[n.bounds.lowest : n.bounds.highest+1] @@ -140,6 +160,16 @@ func (n *NamespaceData) Proof() *nmt.Proof { nodes[i] = NamespacedSha256FromCID(node) } + if n.isAbsentNamespace.Load() { + proof := nmt.NewAbsenceProof( + int(n.bounds.lowest), + int(n.bounds.highest)+1, + nodes, + NamespacedSha256FromCID(n.absenceProofLeaf.Cid()), + NMTIgnoreMaxNamespace, + ) + return &proof + } proof := nmt.NewInclusionProof( int(n.bounds.lowest), int(n.bounds.highest)+1, @@ -150,7 +180,7 @@ func (n *NamespaceData) Proof() *nmt.Proof { } // CollectLeavesByNamespace collects leaves and corresponding proof that could be used to verify -// leaves inclusion. It returns as many leaves from the given root with the given namespace.ID as +// leaves inclusion. It returns as many leaves from the given root with the given Namespace as // it can retrieve. If no shares are found, it returns error as nil. A // non-nil error means that only partial data is returned, because at least one share retrieval // failed. The following implementation is based on `GetShares`. @@ -159,7 +189,7 @@ func (n *NamespaceData) CollectLeavesByNamespace( bGetter blockservice.BlockGetter, root cid.Cid, ) error { - if err := n.validate(); err != nil { + if err := n.validate(root); err != nil { return err } @@ -167,14 +197,13 @@ func (n *NamespaceData) CollectLeavesByNamespace( defer span.End() span.SetAttributes( - attribute.String("namespace", hex.EncodeToString(n.nID)), - attribute.String("root", root.String()), + attribute.String("namespace", n.namespace.String()), ) // buffer the jobs to avoid blocking, we only need as many // queued as the number of shares in the second-to-last layer - jobs := make(chan *job, (n.maxShares+1)/2) - jobs <- &job{id: root, ctx: ctx} + jobs := make(chan job, (n.maxShares+1)/2) + jobs <- job{cid: root, ctx: ctx} var wg chanGroup wg.jobs = jobs @@ -186,7 +215,7 @@ func (n *NamespaceData) CollectLeavesByNamespace( ) for { - var j *job + var j job var ok bool select { case j, ok = <-jobs: @@ -203,19 +232,19 @@ func (n *NamespaceData) CollectLeavesByNamespace( defer wg.done() span.SetAttributes( - attribute.String("cid", j.id.String()), + attribute.String("cid", j.cid.String()), attribute.Int("pos", j.sharePos), ) // if an error is likely to be returned or not depends on // the underlying impl of the blockservice, currently it is not a realistic probability - nd, err := GetNode(ctx, bGetter, j.id) + nd, err := GetNode(ctx, bGetter, j.cid) if err != nil { singleErr.Do(func() { retrievalErr = err }) log.Errorw("could not retrieve IPLD node", - "nID", hex.EncodeToString(n.nID), + "namespace", n.namespace.String(), "pos", j.sharePos, "err", err, ) @@ -235,38 +264,11 @@ func (n *NamespaceData) CollectLeavesByNamespace( } // this node has links in the namespace, so keep walking - for i, lnk := range links { - newJob := &job{ - id: lnk.Cid, - // sharePos represents potential share position in share slice - sharePos: j.sharePos*2 + i, - // depth represents the number of edges present in path from the root node of a tree to that node - depth: j.depth + 1, - // we pass the context to job so that spans are tracked in a tree - // structure - ctx: ctx, - } - // if the link's nID isn't in range we don't need to create a new job for it, - // but need to collect a proof - jobNid := NamespacedSha256FromCID(newJob.id) - - // proof is on the right side, if the nID is less than min namespace of jobNid - if n.nID.Less(nmt.MinNamespace(jobNid, n.nID.Size())) { - n.addProof(right, lnk.Cid, newJob.depth) - continue - } - - // proof is on the left side, if the nID is bigger than max namespace of jobNid - if !n.nID.LessOrEqual(nmt.MaxNamespace(jobNid, n.nID.Size())) { - n.addProof(left, lnk.Cid, newJob.depth) - continue - } - - // by passing the previous check, we know we will have one more node to process - // note: it is important to increase the counter before sending to the channel + newJobs := n.traverseLinks(j, links) + for _, j := range newJobs { wg.add(1) select { - case jobs <- newJob: + case jobs <- j: case <-ctx.Done(): return } @@ -275,6 +277,57 @@ func (n *NamespaceData) CollectLeavesByNamespace( } } +func (n *NamespaceData) traverseLinks(j job, links []*ipld.Link) []job { + if j.isAbsent { + return n.collectAbsenceProofs(j, links) + } + return n.collectNDWithProofs(j, links) +} + +func (n *NamespaceData) collectAbsenceProofs(j job, links []*ipld.Link) []job { + leftLink := links[0].Cid + rightLink := links[1].Cid + // traverse to the left node, while collecting right node as proof + n.addProof(right, rightLink, j.depth) + return []job{j.next(left, leftLink, j.isAbsent)} +} + +func (n *NamespaceData) collectNDWithProofs(j job, links []*ipld.Link) []job { + leftCid := links[0].Cid + rightCid := links[1].Cid + leftLink := NamespacedSha256FromCID(leftCid) + rightLink := NamespacedSha256FromCID(rightCid) + + var nextJobs []job + // check if target namespace is outside of boundaries of both links + if n.namespace.IsOutsideRange(leftLink, rightLink) { + log.Fatalf("target namespace outside of boundaries of links at depth: %v", j.depth) + } + + if !n.namespace.IsAboveMax(leftLink) { + // namespace is within the range of left link + nextJobs = append(nextJobs, j.next(left, leftCid, false)) + } else { + // proof is on the left side, if the namespace is on the right side of the range of left link + n.addProof(left, leftCid, j.depth) + if n.namespace.IsBelowMin(rightLink) { + // namespace is not included in either links, convert to absence collector + n.isAbsentNamespace.Store(true) + nextJobs = append(nextJobs, j.next(right, rightCid, true)) + return nextJobs + } + } + + if !n.namespace.IsBelowMin(rightLink) { + // namespace is within the range of right link + nextJobs = append(nextJobs, j.next(right, rightCid, false)) + } else { + // proof is on the right side, if the namespace is on the left side of the range of right link + n.addProof(right, rightCid, j.depth) + } + return nextJobs +} + type fetchedBounds struct { lowest int64 highest int64 diff --git a/share/ipld/nmt.go b/share/ipld/nmt.go index 3923260555..e7d6b4b513 100644 --- a/share/ipld/nmt.go +++ b/share/ipld/nmt.go @@ -8,10 +8,10 @@ import ( "hash" "math/rand" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" - blocks "github.com/ipfs/go-libipfs/blocks" logging "github.com/ipfs/go-log/v2" mh "github.com/multiformats/go-multihash" mhcore "github.com/multiformats/go-multihash/core" @@ -20,6 +20,8 @@ import ( "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/nmt" + + "github.com/celestiaorg/celestia-node/share" ) var ( @@ -38,17 +40,14 @@ const ( // that contain an NMT node (inner and leaf nodes). sha256NamespaceFlagged = 0x7701 - // NamespaceSize is a system-wide size for NMT namespaces. - NamespaceSize = appconsts.NamespaceSize - // NmtHashSize is the size of a digest created by an NMT in bytes. - NmtHashSize = 2*NamespaceSize + sha256.Size + NmtHashSize = 2*share.NamespaceSize + sha256.Size // innerNodeSize is the size of data in inner nodes. innerNodeSize = NmtHashSize * 2 // leafNodeSize is the size of data in leaf nodes. - leafNodeSize = NamespaceSize + appconsts.ShareSize + leafNodeSize = share.NamespaceSize + appconsts.ShareSize // cidPrefixSize is the size of the prepended buffer of the CID encoding // for NamespacedSha256. For more information, see: @@ -56,21 +55,15 @@ const ( cidPrefixSize = 4 // NMTIgnoreMaxNamespace is currently used value for IgnoreMaxNamespace option in NMT. - // IgnoreMaxNamespace defines whether the largest possible namespace.ID MAX_NID should be 'ignored'. + // IgnoreMaxNamespace defines whether the largest possible Namespace MAX_NID should be 'ignored'. // If set to true, this allows for shorter proofs in particular use-cases. NMTIgnoreMaxNamespace = true ) -var ( - // MaxSquareSize is currently the maximum size supported for unerasured data in - // rsmt2d.ExtendedDataSquare. - MaxSquareSize = appconsts.SquareSizeUpperBound(appconsts.LatestVersion) -) - func init() { // required for Bitswap to hash and verify inbound data correctly mhcore.Register(sha256NamespaceFlagged, func() hash.Hash { - nh := nmt.NewNmtHasher(sha256.New(), NamespaceSize, true) + nh := nmt.NewNmtHasher(sha256.New(), share.NamespaceSize, true) nh.Reset() return nh }) diff --git a/share/ipld/nmt_adder.go b/share/ipld/nmt_adder.go index 386a0083a3..d090c679d9 100644 --- a/share/ipld/nmt_adder.go +++ b/share/ipld/nmt_adder.go @@ -9,6 +9,14 @@ import ( "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" "github.com/ipfs/go-merkledag" + + "github.com/celestiaorg/nmt" +) + +type ctxKey int + +const ( + proofsAdderKey ctxKey = iota ) // NmtNodeAdder adds ipld.Nodes to the underlying ipld.Batch if it is inserted @@ -55,26 +63,6 @@ func (n *NmtNodeAdder) Visit(hash []byte, children ...[]byte) { } } -// VisitInnerNodes is a NodeVisitor that does not store leaf nodes to the blockservice. -func (n *NmtNodeAdder) VisitInnerNodes(hash []byte, children ...[]byte) { - n.lock.Lock() - defer n.lock.Unlock() - - if n.err != nil { - return // protect from further visits if there is an error - } - - id := MustCidFromNamespacedSha256(hash) - switch len(children) { - case 1: - break - case 2: - n.err = n.add.Add(n.ctx, newNMTNode(id, append(children[0], children[1]...))) - default: - panic("expected a binary tree") - } -} - // Commit checks for errors happened during Visit and if absent commits data to inner Batch. func (n *NmtNodeAdder) Commit() error { n.lock.Lock() @@ -112,3 +100,96 @@ func BatchSize(squareSize int) int { // and for the last two layers as well: return (squareSize*2-1)*squareSize*2 - (squareSize * squareSize) } + +// ProofsAdder is used to collect proof nodes, while traversing merkle tree +type ProofsAdder struct { + lock sync.RWMutex + proofs map[cid.Cid][]byte +} + +// NewProofsAdder creates new instance of ProofsAdder. +func NewProofsAdder(squareSize int) *ProofsAdder { + return &ProofsAdder{ + // preallocate map to fit all inner nodes for given square size + proofs: make(map[cid.Cid][]byte, innerNodesAmount(squareSize)), + } +} + +// CtxWithProofsAdder creates context, that will contain ProofsAdder. If context is leaked to +// another go-routine, proofs will be not collected by gc. To prevent it, use Purge after Proofs +// are collected from adder, to preemptively release memory allocated for proofs. +func CtxWithProofsAdder(ctx context.Context, adder *ProofsAdder) context.Context { + return context.WithValue(ctx, proofsAdderKey, adder) +} + +// ProofsAdderFromCtx extracts ProofsAdder from context +func ProofsAdderFromCtx(ctx context.Context) *ProofsAdder { + val := ctx.Value(proofsAdderKey) + adder, ok := val.(*ProofsAdder) + if !ok || adder == nil { + return nil + } + return adder +} + +// Proofs returns proofs collected by ProofsAdder +func (a *ProofsAdder) Proofs() map[cid.Cid][]byte { + if a == nil { + return nil + } + + a.lock.RLock() + defer a.lock.RUnlock() + return a.proofs +} + +// VisitFn returns NodeVisitorFn, that will collect proof nodes while traversing merkle tree. +func (a *ProofsAdder) VisitFn() nmt.NodeVisitorFn { + if a == nil { + return nil + } + + a.lock.RLock() + defer a.lock.RUnlock() + + // proofs are already collected, don't collect second time + if len(a.proofs) > 0 { + return nil + } + return a.visitInnerNodes +} + +// Purge removed proofs from ProofsAdder allowing GC to collect the memory +func (a *ProofsAdder) Purge() { + if a == nil { + return + } + + a.lock.Lock() + defer a.lock.Unlock() + + a.proofs = nil +} + +func (a *ProofsAdder) visitInnerNodes(hash []byte, children ...[]byte) { + switch len(children) { + case 1: + break + case 2: + id := MustCidFromNamespacedSha256(hash) + a.addProof(id, append(children[0], children[1]...)) + default: + panic("expected a binary tree") + } +} + +func (a *ProofsAdder) addProof(id cid.Cid, proof []byte) { + a.lock.Lock() + defer a.lock.Unlock() + a.proofs[id] = proof +} + +// innerNodesAmount return amount of inner nodes in eds with given size +func innerNodesAmount(squareSize int) int { + return 2 * (squareSize - 1) * squareSize +} diff --git a/share/ipld/nmt_test.go b/share/ipld/nmt_test.go index b52a75c150..77268d7112 100644 --- a/share/ipld/nmt_test.go +++ b/share/ipld/nmt_test.go @@ -1,36 +1,33 @@ package ipld import ( - "bytes" - "crypto/rand" - "sort" "strconv" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/eds/edstest" ) // TestNamespaceFromCID checks that deriving the Namespaced hash from // the given CID works correctly. func TestNamespaceFromCID(t *testing.T) { var tests = []struct { - randData [][]byte + eds *rsmt2d.ExtendedDataSquare }{ // note that the number of shares must be a power of two - {randData: generateRandNamespacedRawData(4, appconsts.NamespaceSize, appconsts.ShareSize-appconsts.NamespaceSize)}, - {randData: generateRandNamespacedRawData(16, appconsts.NamespaceSize, appconsts.ShareSize-appconsts.NamespaceSize)}, + {eds: edstest.RandEDS(t, 4)}, + {eds: edstest.RandEDS(t, 16)}, } for i, tt := range tests { t.Run(strconv.Itoa(i), func(t *testing.T) { - // create DAH from rand data - eds, err := da.ExtendShares(tt.randData) + dah, err := da.NewDataAvailabilityHeader(tt.eds) require.NoError(t, err) - dah := da.NewDataAvailabilityHeader(eds) // check to make sure NamespacedHash is correctly derived from CID for _, row := range dah.RowRoots { c, err := CidFromNamespacedSha256(row) @@ -42,28 +39,3 @@ func TestNamespaceFromCID(t *testing.T) { }) } } - -// generateRandNamespacedRawData returns random namespaced raw data for testing -// purposes. Note that this does not check that total is a power of two. -func generateRandNamespacedRawData(total, nidSize, leafSize uint32) [][]byte { - data := make([][]byte, total) - for i := uint32(0); i < total; i++ { - nid := make([]byte, nidSize) - - _, _ = rand.Read(nid) - data[i] = nid - } - sortByteArrays(data) - for i := uint32(0); i < total; i++ { - d := make([]byte, leafSize) - - _, _ = rand.Read(d) - data[i] = append(data[i], d...) - } - - return data -} - -func sortByteArrays(src [][]byte) { - sort.Slice(src, func(i, j int) bool { return bytes.Compare(src[i], src[j]) < 0 }) -} diff --git a/share/mocks/getter.go b/share/mocks/getter.go index 1c73c9170d..2a1b84efd5 100644 --- a/share/mocks/getter.go +++ b/share/mocks/getter.go @@ -8,11 +8,11 @@ import ( context "context" reflect "reflect" + gomock "github.com/golang/mock/gomock" + da "github.com/celestiaorg/celestia-app/pkg/da" share "github.com/celestiaorg/celestia-node/share" - namespace "github.com/celestiaorg/nmt/namespace" rsmt2d "github.com/celestiaorg/rsmt2d" - gomock "github.com/golang/mock/gomock" ) // MockGetter is a mock of Getter interface. @@ -69,7 +69,7 @@ func (mr *MockGetterMockRecorder) GetShare(arg0, arg1, arg2, arg3 interface{}) * } // GetSharesByNamespace mocks base method. -func (m *MockGetter) GetSharesByNamespace(arg0 context.Context, arg1 *da.DataAvailabilityHeader, arg2 namespace.ID) (share.NamespacedShares, error) { +func (m *MockGetter) GetSharesByNamespace(arg0 context.Context, arg1 *da.DataAvailabilityHeader, arg2 share.Namespace) (share.NamespacedShares, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSharesByNamespace", arg0, arg1, arg2) ret0, _ := ret[0].(share.NamespacedShares) diff --git a/share/namespace.go b/share/namespace.go new file mode 100644 index 0000000000..df4ad74058 --- /dev/null +++ b/share/namespace.go @@ -0,0 +1,184 @@ +package share + +import ( + "bytes" + "encoding/hex" + "fmt" + + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/celestiaorg/nmt/namespace" +) + +// NamespaceSize is a system-wide size for NMT namespaces. +const NamespaceSize = appns.NamespaceSize + +// Various reserved namespaces. +var ( + // MaxPrimaryReservedNamespace is the highest primary reserved namespace. + // Namespaces lower than this are reserved for protocol use. + MaxPrimaryReservedNamespace = Namespace(appns.MaxPrimaryReservedNamespace.Bytes()) + // MinSecondaryReservedNamespace is the lowest secondary reserved namespace + // reserved for protocol use. Namespaces higher than this are reserved for + // protocol use. + MinSecondaryReservedNamespace = Namespace(appns.MinSecondaryReservedNamespace.Bytes()) + ParitySharesNamespace = Namespace(appns.ParitySharesNamespace.Bytes()) + TailPaddingNamespace = Namespace(appns.TailPaddingNamespace.Bytes()) + PrimaryReservedPaddingNamespace = Namespace(appns.PrimaryReservedPaddingNamespace.Bytes()) + TxNamespace = Namespace(appns.TxNamespace.Bytes()) + PayForBlobNamespace = Namespace(appns.PayForBlobNamespace.Bytes()) + ISRNamespace = Namespace(appns.IntermediateStateRootsNamespace.Bytes()) +) + +// Namespace represents namespace of a Share. +// Consists of version byte and namespace ID. +type Namespace []byte + +// NewBlobNamespaceV0 takes a variable size byte slice and creates a valid version 0 Blob Namespace. +// The byte slice must be <= 10 bytes. +// If it is less than 10 bytes, it will be left padded to size 10 with 0s. +// Use predefined namespaces above, if non-blob namespace is needed. +func NewBlobNamespaceV0(id []byte) (Namespace, error) { + if len(id) == 0 || len(id) > appns.NamespaceVersionZeroIDSize { + return nil, fmt.Errorf( + "namespace id must be > 0 && <= %d, but it was %d bytes", appns.NamespaceVersionZeroIDSize, len(id)) + } + + n := make(Namespace, NamespaceSize) + // version and zero padding are already set as zero, + // so simply copying subNID to the end is enough to comply the V0 spec + copy(n[len(n)-len(id):], id) + return n, n.ValidateForBlob() +} + +// NamespaceFromBytes converts bytes into Namespace and validates it. +func NamespaceFromBytes(b []byte) (Namespace, error) { + n := Namespace(b) + return n, n.Validate() +} + +// Version reports version of the Namespace. +func (n Namespace) Version() byte { + return n[appns.NamespaceVersionSize-1] +} + +// ID reports ID of the Namespace. +func (n Namespace) ID() namespace.ID { + return namespace.ID(n[appns.NamespaceVersionSize:]) +} + +// ToNMT converts the whole Namespace(both Version and ID parts) into NMT's namespace.ID +// NOTE: Once https://github.com/celestiaorg/nmt/issues/206 is closed Namespace should become NNT's +// type. +func (n Namespace) ToNMT() namespace.ID { + return namespace.ID(n) +} + +// ToAppNamespace converts the Namespace to App's definition of Namespace. +// TODO: Unify types between node and app +func (n Namespace) ToAppNamespace() appns.Namespace { + return appns.Namespace{Version: n.Version(), ID: n.ID()} +} + +// Len reports the total length of the namespace. +func (n Namespace) Len() int { + return len(n) +} + +// String stringifies the Namespace. +func (n Namespace) String() string { + return hex.EncodeToString(n) +} + +// Equals compares two Namespaces. +func (n Namespace) Equals(target Namespace) bool { + return bytes.Equal(n, target) +} + +// Validate checks if the namespace is correct. +func (n Namespace) Validate() error { + if n.Len() != NamespaceSize { + return fmt.Errorf("invalid namespace length: expected %d, got %d", NamespaceSize, n.Len()) + } + if n.Version() != appns.NamespaceVersionZero && n.Version() != appns.NamespaceVersionMax { + return fmt.Errorf("invalid namespace version %v", n.Version()) + } + if len(n.ID()) != appns.NamespaceIDSize { + return fmt.Errorf("invalid namespace id length: expected %d, got %d", appns.NamespaceIDSize, n.ID().Size()) + } + if n.Version() == appns.NamespaceVersionZero && !bytes.HasPrefix(n.ID(), appns.NamespaceVersionZeroPrefix) { + return fmt.Errorf("invalid namespace id: expect %d leading zeroes", len(appns.NamespaceVersionZeroPrefix)) + } + return nil +} + +// ValidateForData checks if the Namespace is of real/useful data. +func (n Namespace) ValidateForData() error { + if err := n.Validate(); err != nil { + return err + } + if n.Equals(ParitySharesNamespace) || n.Equals(TailPaddingNamespace) { + return fmt.Errorf("invalid data namespace(%s): parity and tail padding namespace are forbidden", n) + } + if n.Version() != appns.NamespaceVersionZero { + return fmt.Errorf("invalid data namespace(%s): only version 0 is supported", n) + } + return nil +} + +// ValidateForBlob checks if the Namespace is valid blob namespace. +func (n Namespace) ValidateForBlob() error { + if err := n.ValidateForData(); err != nil { + return err + } + if bytes.Compare(n, MaxPrimaryReservedNamespace) < 1 { + return fmt.Errorf("invalid blob namespace(%s): reserved namespaces are forbidden", n) + } + if bytes.Compare(n, MinSecondaryReservedNamespace) > -1 { + return fmt.Errorf("invalid blob namespace(%s): reserved namespaces are forbidden", n) + } + return nil +} + +// IsAboveMax checks if the namespace is above the maximum namespace of the given hash. +func (n Namespace) IsAboveMax(nodeHash []byte) bool { + return !n.IsLessOrEqual(nodeHash[n.Len() : n.Len()*2]) +} + +// IsBelowMin checks if the target namespace is below the minimum namespace of the given hash. +func (n Namespace) IsBelowMin(nodeHash []byte) bool { + return n.IsLess(nodeHash[:n.Len()]) +} + +// IsOutsideRange checks if the namespace is outside the min-max range of the given hashes. +func (n Namespace) IsOutsideRange(leftNodeHash, rightNodeHash []byte) bool { + return n.IsBelowMin(leftNodeHash) || n.IsAboveMax(rightNodeHash) +} + +// Repeat copies the Namespace t times. +func (n Namespace) Repeat(t int) []Namespace { + ns := make([]Namespace, t) + for i := 0; i < t; i++ { + ns[i] = n + } + return ns +} + +// IsLess reports if the Namespace is less than the target. +func (n Namespace) IsLess(target Namespace) bool { + return bytes.Compare(n, target) == -1 +} + +// IsLessOrEqual reports if the Namespace is less than the target. +func (n Namespace) IsLessOrEqual(target Namespace) bool { + return bytes.Compare(n, target) < 1 +} + +// IsGreater reports if the Namespace is greater than the target. +func (n Namespace) IsGreater(target Namespace) bool { + return bytes.Compare(n, target) == 1 +} + +// IsGreaterOrEqualThan reports if the Namespace is greater or equal than the target. +func (n Namespace) IsGreaterOrEqualThan(target Namespace) bool { + return bytes.Compare(n, target) > -1 +} diff --git a/share/namespace_test.go b/share/namespace_test.go new file mode 100644 index 0000000000..786441b043 --- /dev/null +++ b/share/namespace_test.go @@ -0,0 +1,216 @@ +package share + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + appns "github.com/celestiaorg/celestia-app/pkg/namespace" +) + +var ( + validID = append( + appns.NamespaceVersionZeroPrefix, + bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)..., + ) + tooShortID = append(appns.NamespaceVersionZeroPrefix, []byte{1}...) + tooLongID = append(appns.NamespaceVersionZeroPrefix, bytes.Repeat([]byte{1}, NamespaceSize)...) + invalidPrefixID = bytes.Repeat([]byte{1}, NamespaceSize) +) + +func TestNewNamespaceV0(t *testing.T) { + type testCase struct { + name string + subNid []byte + expected Namespace + wantErr bool + } + testCases := []testCase{ + { + name: "8 byte id, gets left padded", + subNid: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, + expected: Namespace{ + 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // filled zeros + 0x0, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}, // id with left padding + wantErr: false, + }, + { + name: "10 byte id, no padding", + subNid: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x9, 0x10}, + expected: Namespace{ + 0x0, // version + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // filled zeros + 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x10}, // id + wantErr: false, + }, + { + name: "11 byte id", + subNid: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x9, 0x10, 0x11}, + expected: []byte{}, + wantErr: true, + }, + { + name: "nil id", + subNid: nil, + expected: []byte{}, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := NewBlobNamespaceV0(tc.subNid) + if tc.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.expected, got) + }) + } +} + +func TestFrom(t *testing.T) { + type testCase struct { + name string + bytes []byte + wantErr bool + want Namespace + } + validNamespace := []byte{} + validNamespace = append(validNamespace, appns.NamespaceVersionZero) + validNamespace = append(validNamespace, appns.NamespaceVersionZeroPrefix...) + validNamespace = append(validNamespace, bytes.Repeat([]byte{0x1}, appns.NamespaceVersionZeroIDSize)...) + parityNamespace := bytes.Repeat([]byte{0xFF}, NamespaceSize) + + testCases := []testCase{ + { + name: "valid namespace", + bytes: validNamespace, + wantErr: false, + want: append([]byte{appns.NamespaceVersionZero}, validID...), + }, + { + name: "parity namespace", + bytes: parityNamespace, + wantErr: false, + want: append([]byte{appns.NamespaceVersionMax}, bytes.Repeat([]byte{0xFF}, appns.NamespaceIDSize)...), + }, + { + name: "unsupported version", + bytes: append([]byte{1}, append( + appns.NamespaceVersionZeroPrefix, + bytes.Repeat([]byte{1}, NamespaceSize-len(appns.NamespaceVersionZeroPrefix))..., + )...), + wantErr: true, + }, + { + name: "unsupported id: too short", + bytes: append([]byte{appns.NamespaceVersionZero}, tooShortID...), + wantErr: true, + }, + { + name: "unsupported id: too long", + bytes: append([]byte{appns.NamespaceVersionZero}, tooLongID...), + wantErr: true, + }, + { + name: "unsupported id: invalid prefix", + bytes: append([]byte{appns.NamespaceVersionZero}, invalidPrefixID...), + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := NamespaceFromBytes(tc.bytes) + if tc.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestValidateForBlob(t *testing.T) { + type testCase struct { + name string + ns Namespace + wantErr bool + } + + validNamespace, err := NewBlobNamespaceV0(bytes.Repeat([]byte{0x1}, appns.NamespaceVersionZeroIDSize)) + require.NoError(t, err) + + testCases := []testCase{ + { + name: "valid blob namespace", + ns: validNamespace, + wantErr: false, + }, + { + name: "invalid blob namespace: parity shares namespace", + ns: ParitySharesNamespace, + wantErr: true, + }, + { + name: "invalid blob namespace: tail padding namespace", + ns: TailPaddingNamespace, + wantErr: true, + }, + { + name: "invalid blob namespace: tx namespace", + ns: TxNamespace, + wantErr: true, + }, + { + name: "invalid blob namespace: namespace version max", + ns: append([]byte{appns.NamespaceVersionMax}, bytes.Repeat([]byte{0x0}, appns.NamespaceIDSize)...), + wantErr: true, + }, + { + name: "invalid blob namespace: primary reserved namespace", + ns: primaryReservedNamespace(0x10), + wantErr: true, + }, + { + name: "invalid blob namespace: secondary reserved namespace", + ns: secondaryReservedNamespace(0x10), + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := tc.ns.ValidateForBlob() + + if tc.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + }) + } +} + +func primaryReservedNamespace(lastByte byte) Namespace { + result := make([]byte, NamespaceSize) + result = append(result, appns.NamespaceVersionZero) + result = append(result, appns.NamespaceVersionZeroPrefix...) + result = append(result, bytes.Repeat([]byte{0x0}, appns.NamespaceVersionZeroIDSize-1)...) + result = append(result, lastByte) + return result +} + +func secondaryReservedNamespace(lastByte byte) Namespace { + result := make([]byte, NamespaceSize) + result = append(result, appns.NamespaceVersionMax) + result = append(result, bytes.Repeat([]byte{0xFF}, appns.NamespaceIDSize-1)...) + result = append(result, lastByte) + return result +} diff --git a/share/nid.go b/share/nid.go deleted file mode 100644 index b7fd4e5836..0000000000 --- a/share/nid.go +++ /dev/null @@ -1,28 +0,0 @@ -package share - -import ( - "fmt" - - appns "github.com/celestiaorg/celestia-app/pkg/namespace" - "github.com/celestiaorg/nmt/namespace" -) - -// NewNamespaceV0 takes a variable size byte slice and creates a version 0 Namespace ID. -// The byte slice must be <= 10 bytes. -// If it is less than 10 bytes, it will be left padded to size 10 with 0s. -func NewNamespaceV0(subNId []byte) (namespace.ID, error) { - if lnid := len(subNId); lnid > appns.NamespaceVersionZeroIDSize { - return nil, fmt.Errorf("namespace id must be <= %v, but it was %v bytes", appns.NamespaceVersionZeroIDSize, lnid) - } - - id := make([]byte, appns.NamespaceIDSize) - leftPaddingOffset := appns.NamespaceVersionZeroIDSize - len(subNId) - copy(id[appns.NamespaceVersionZeroPrefixSize+leftPaddingOffset:], subNId) - - appID, err := appns.New(appns.NamespaceVersionZero, id) - if err != nil { - return nil, err - } - - return appID.Bytes(), nil -} diff --git a/share/nid_test.go b/share/nid_test.go deleted file mode 100644 index 8f83d430e3..0000000000 --- a/share/nid_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package share - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/celestiaorg/nmt/namespace" -) - -func TestNewNamespaceV0(t *testing.T) { - type testCase struct { - name string - subNid []byte - expected namespace.ID - wantErr bool - } - testCases := []testCase{ - { - name: "8 byte subNid, gets left padded", - subNid: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, - expected: namespace.ID{ - 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // filled zeros - 0x0, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}, // id with left padding - wantErr: false, - }, - { - name: "10 byte subNid, no padding", - subNid: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x9, 0x10}, - expected: namespace.ID{ - 0x0, // version - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // filled zeros - 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x10}, // id - wantErr: false, - }, - { - name: "11 byte subNid", - subNid: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x9, 0x10, 0x11}, - expected: []byte{}, - wantErr: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - got, err := NewNamespaceV0(tc.subNid) - if tc.wantErr { - assert.Error(t, err) - return - } - assert.NoError(t, err) - assert.Equal(t, tc.expected, got) - }) - } -} diff --git a/share/p2p/discovery/discovery.go b/share/p2p/discovery/discovery.go index 96eda8bd78..f24df2c88b 100644 --- a/share/p2p/discovery/discovery.go +++ b/share/p2p/discovery/discovery.go @@ -33,6 +33,10 @@ const ( // retryTimeout defines time interval between discovery and advertise attempts. retryTimeout = time.Second + + // logInterval defines the time interval at which a warning message will be logged + // if the desired number of nodes is not detected. + logInterval = 5 * time.Minute ) // discoveryRetryTimeout defines time interval between discovery attempts, needed for tests @@ -195,26 +199,34 @@ func (d *Discovery) Advertise(ctx context.Context) { } // discoveryLoop ensures we always have '~peerLimit' connected peers. -// It starts peer discovery per request and restarts the process until the soft limit reached. +// It initiates peer discovery upon request and restarts the process until the soft limit is +// reached. func (d *Discovery) discoveryLoop(ctx context.Context) { t := time.NewTicker(discoveryRetryTimeout) defer t.Stop() + + warnTicker := time.NewTicker(logInterval) + defer warnTicker.Stop() + for { - // drain all previous ticks from channel + // drain all previous ticks from the channel drainChannel(t.C) select { case <-t.C: - found := d.discover(ctx) - if !found { - // rerun discovery if amount of peers didn't reach the limit + if !d.discover(ctx) { + // rerun discovery if the number of peers hasn't reached the limit continue } - case <-ctx.Done(): - return - } - - select { - case <-d.triggerDisc: + case <-warnTicker.C: + if d.set.Size() < d.set.Limit() { + log.Warnf( + "Potentially degraded connectivity, unable to discover the desired amount of full node peers in %v. "+ + "Number of peers discovered: %d. Required: %d.", + logInterval, d.set.Size(), d.set.Limit(), + ) + } + // Do not break the loop; just continue + continue case <-ctx.Done(): return } @@ -251,7 +263,10 @@ func (d *Discovery) discover(ctx context.Context) bool { log.Debugw("reached soft peer limit, skipping discovery", "size", size) return true } - log.Infow("discovering peers", "want", want) + // TODO @renaynay: eventually, have a mechanism to catch if wanted amount of peers + // has not been discovered in X amount of time so that users are warned of degraded + // FN connectivity. + log.Debugw("discovering peers", "want", want) // we use errgroup as it provide limits var wg errgroup.Group @@ -322,10 +337,6 @@ func (d *Discovery) handleDiscoveredPeer(ctx context.Context, peer peer.AddrInfo d.metrics.observeHandlePeer(ctx, handlePeerSkipSelf) logger.Debug("skip handle: self discovery") return false - case len(peer.Addrs) == 0: - d.metrics.observeHandlePeer(ctx, handlePeerEmptyAddrs) - logger.Debug("skip handle: empty address list") - return false case d.set.Size() >= d.set.Limit(): d.metrics.observeHandlePeer(ctx, handlePeerEnoughPeers) logger.Debug("skip handle: enough peers found") diff --git a/share/p2p/discovery/discovery_test.go b/share/p2p/discovery/discovery_test.go index f0935086ef..06d88a9079 100644 --- a/share/p2p/discovery/discovery_test.go +++ b/share/p2p/discovery/discovery_test.go @@ -11,6 +11,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/discovery/routing" basic "github.com/libp2p/go-libp2p/p2p/host/basic" + "github.com/libp2p/go-libp2p/p2p/host/eventbus" swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -21,7 +22,7 @@ func TestDiscovery(t *testing.T) { discoveryRetryTimeout = time.Millisecond * 100 // defined in discovery.go - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*30) t.Cleanup(cancel) tn := newTestnet(ctx, t) @@ -80,8 +81,9 @@ type testnet struct { } func newTestnet(ctx context.Context, t *testing.T) *testnet { - swarm := swarmt.GenSwarm(t, swarmt.OptDisableTCP) - hst, err := basic.NewHost(swarm, &basic.HostOpts{}) + bus := eventbus.NewBus() + swarm := swarmt.GenSwarm(t, swarmt.OptDisableTCP, swarmt.EventBus(bus)) + hst, err := basic.NewHost(swarm, &basic.HostOpts{EventBus: bus}) require.NoError(t, err) hst.Start() @@ -110,8 +112,9 @@ func (t *testnet) discovery(opts ...Option) *Discovery { } func (t *testnet) peer() (host.Host, discovery.Discovery) { - swarm := swarmt.GenSwarm(t.T, swarmt.OptDisableTCP) - hst, err := basic.NewHost(swarm, &basic.HostOpts{}) + bus := eventbus.NewBus() + swarm := swarmt.GenSwarm(t.T, swarmt.OptDisableTCP, swarmt.EventBus(bus)) + hst, err := basic.NewHost(swarm, &basic.HostOpts{EventBus: bus}) require.NoError(t.T, err) hst.Start() diff --git a/share/p2p/discovery/metrics.go b/share/p2p/discovery/metrics.go index b6adbb1984..99c9bb4548 100644 --- a/share/p2p/discovery/metrics.go +++ b/share/p2p/discovery/metrics.go @@ -5,11 +5,9 @@ import ( "fmt" "github.com/libp2p/go-libp2p/core/peer" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/instrument/asyncint64" - "go.opentelemetry.io/otel/metric/instrument/syncint64" + "go.opentelemetry.io/otel/metric" ) const ( @@ -28,18 +26,18 @@ const ( ) var ( - meter = global.MeterProvider().Meter("share_discovery") + meter = otel.Meter("share_discovery") ) type handlePeerResult string type metrics struct { - peersAmount asyncint64.Gauge - discoveryResult syncint64.Counter // attributes: enough_peers[bool],is_canceled[bool] - handlePeerResult syncint64.Counter // attributes: result[string] - advertise syncint64.Counter // attributes: failed[bool] - peerAdded syncint64.Counter - peerRemoved syncint64.Counter + peersAmount metric.Int64ObservableGauge + discoveryResult metric.Int64Counter // attributes: enough_peers[bool],is_canceled[bool] + handlePeerResult metric.Int64Counter // attributes: result[string] + advertise metric.Int64Counter // attributes: failed[bool] + peerAdded metric.Int64Counter + peerRemoved metric.Int64Counter } // WithMetrics turns on metric collection in discoery. @@ -54,44 +52,44 @@ func (d *Discovery) WithMetrics() error { } func initMetrics(d *Discovery) (*metrics, error) { - peersAmount, err := meter.AsyncInt64().Gauge("discovery_amount_of_peers", - instrument.WithDescription("amount of peers in discovery set")) + peersAmount, err := meter.Int64ObservableGauge("discovery_amount_of_peers", + metric.WithDescription("amount of peers in discovery set")) if err != nil { return nil, err } - discoveryResult, err := meter.SyncInt64().Counter("discovery_find_peers_result", - instrument.WithDescription("result of find peers run")) + discoveryResult, err := meter.Int64Counter("discovery_find_peers_result", + metric.WithDescription("result of find peers run")) if err != nil { return nil, err } - handlePeerResultCounter, err := meter.SyncInt64().Counter("discovery_handler_peer_result", - instrument.WithDescription("result handling found peer")) + handlePeerResultCounter, err := meter.Int64Counter("discovery_handler_peer_result", + metric.WithDescription("result handling found peer")) if err != nil { return nil, err } - advertise, err := meter.SyncInt64().Counter("discovery_advertise_event", - instrument.WithDescription("advertise events counter")) + advertise, err := meter.Int64Counter("discovery_advertise_event", + metric.WithDescription("advertise events counter")) if err != nil { return nil, err } - peerAdded, err := meter.SyncInt64().Counter("discovery_add_peer", - instrument.WithDescription("add peer to discovery set counter")) + peerAdded, err := meter.Int64Counter("discovery_add_peer", + metric.WithDescription("add peer to discovery set counter")) if err != nil { return nil, err } - peerRemoved, err := meter.SyncInt64().Counter("discovery_remove_peer", - instrument.WithDescription("remove peer from discovery set counter")) + peerRemoved, err := meter.Int64Counter("discovery_remove_peer", + metric.WithDescription("remove peer from discovery set counter")) if err != nil { return nil, err } - backOffSize, err := meter.AsyncInt64().Gauge("discovery_backoff_amount", - instrument.WithDescription("amount of peers in backoff")) + backOffSize, err := meter.Int64ObservableGauge("discovery_backoff_amount", + metric.WithDescription("amount of peers in backoff")) if err != nil { return nil, err } @@ -105,16 +103,12 @@ func initMetrics(d *Discovery) (*metrics, error) { peerRemoved: peerRemoved, } - err = meter.RegisterCallback( - []instrument.Asynchronous{ - peersAmount, - backOffSize, - }, - func(ctx context.Context) { - peersAmount.Observe(ctx, int64(d.set.Size())) - backOffSize.Observe(ctx, int64(d.connector.Size())) - }, - ) + callback := func(ctx context.Context, observer metric.Observer) error { + observer.ObserveInt64(peersAmount, int64(d.set.Size())) + observer.ObserveInt64(backOffSize, int64(d.connector.Size())) + return nil + } + _, err = meter.RegisterCallback(callback, peersAmount, backOffSize) if err != nil { return nil, fmt.Errorf("registering metrics callback: %w", err) } @@ -130,7 +124,8 @@ func (m *metrics) observeFindPeers(ctx context.Context, isEnoughPeers bool) { } m.discoveryResult.Add(ctx, 1, - attribute.Bool(discoveryEnoughPeersKey, isEnoughPeers)) + metric.WithAttributes( + attribute.Bool(discoveryEnoughPeersKey, isEnoughPeers))) } func (m *metrics) observeHandlePeer(ctx context.Context, result handlePeerResult) { @@ -142,7 +137,8 @@ func (m *metrics) observeHandlePeer(ctx context.Context, result handlePeerResult } m.handlePeerResult.Add(ctx, 1, - attribute.String(handlePeerResultKey, string(result))) + metric.WithAttributes( + attribute.String(handlePeerResultKey, string(result)))) } func (m *metrics) observeAdvertise(ctx context.Context, err error) { @@ -154,7 +150,8 @@ func (m *metrics) observeAdvertise(ctx context.Context, err error) { } m.advertise.Add(ctx, 1, - attribute.Bool(advertiseFailedKey, err != nil)) + metric.WithAttributes( + attribute.Bool(advertiseFailedKey, err != nil))) } func (m *metrics) observeOnPeersUpdate(_ peer.ID, isAdded bool) { diff --git a/share/p2p/errors.go b/share/p2p/errors.go index 77f23c554e..cb7b596f47 100644 --- a/share/p2p/errors.go +++ b/share/p2p/errors.go @@ -9,6 +9,8 @@ import ( // available at the moment. The request may be retried later, but it's unlikely to succeed. var ErrNotFound = errors.New("the requested data or resource could not be found") +var ErrRateLimited = errors.New("server is overloaded and rate limited the request") + // ErrInvalidResponse is returned when a peer returns an invalid response or caused an internal // error. It is used to signal that the peer couldn't serve the data successfully, and should not be // retried. diff --git a/share/p2p/metrics.go b/share/p2p/metrics.go index 87c1e2eeb0..fee3b12413 100644 --- a/share/p2p/metrics.go +++ b/share/p2p/metrics.go @@ -4,18 +4,20 @@ import ( "context" "fmt" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/instrument/syncint64" - "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/metric" ) -var meter = global.MeterProvider().Meter("shrex/eds") +var meter = otel.Meter("shrex/eds") type status string const ( + StatusBadRequest status = "bad_request" + StatusSendRespErr status = "send_resp_err" + StatusSendReqErr status = "send_req_err" + StatusReadRespErr status = "read_resp_err" StatusInternalErr status = "internal_err" StatusNotFound status = "not_found" StatusTimeout status = "timeout" @@ -24,7 +26,7 @@ const ( ) type Metrics struct { - totalRequestCounter syncint64.Counter + totalRequestCounter metric.Int64Counter } // ObserveRequests increments the total number of requests sent with the given status as an @@ -36,14 +38,16 @@ func (m *Metrics) ObserveRequests(ctx context.Context, count int64, status statu if ctx.Err() != nil { ctx = context.Background() } - m.totalRequestCounter.Add(ctx, count, attribute.String("status", string(status))) + m.totalRequestCounter.Add(ctx, count, + metric.WithAttributes( + attribute.String("status", string(status)), + )) } func InitClientMetrics(protocol string) (*Metrics, error) { - totalRequestCounter, err := meter.SyncInt64().Counter( + totalRequestCounter, err := meter.Int64Counter( fmt.Sprintf("shrex_%s_client_total_requests", protocol), - instrument.WithUnit(unit.Dimensionless), - instrument.WithDescription(fmt.Sprintf("Total count of sent shrex/%s requests", protocol)), + metric.WithDescription(fmt.Sprintf("Total count of sent shrex/%s requests", protocol)), ) if err != nil { return nil, err @@ -55,10 +59,9 @@ func InitClientMetrics(protocol string) (*Metrics, error) { } func InitServerMetrics(protocol string) (*Metrics, error) { - totalRequestCounter, err := meter.SyncInt64().Counter( + totalRequestCounter, err := meter.Int64Counter( fmt.Sprintf("shrex_%s_server_total_responses", protocol), - instrument.WithUnit(unit.Dimensionless), - instrument.WithDescription(fmt.Sprintf("Total count of sent shrex/%s responses", protocol)), + metric.WithDescription(fmt.Sprintf("Total count of sent shrex/%s responses", protocol)), ) if err != nil { return nil, err diff --git a/share/p2p/peers/manager.go b/share/p2p/peers/manager.go index 2a7c1fee18..87f9361ee2 100644 --- a/share/p2p/peers/manager.go +++ b/share/p2p/peers/manager.go @@ -293,7 +293,7 @@ func (m *Manager) subscribeHeader(ctx context.Context, headerSub libhead.Subscri m.validatedPool(h.DataHash.String()) // store first header for validation purposes - if m.initialHeight.CompareAndSwap(0, uint64(h.Height())) { + if m.initialHeight.CompareAndSwap(0, h.Height()) { log.Debugw("stored initial height", "height", h.Height()) } } diff --git a/share/p2p/peers/manager_test.go b/share/p2p/peers/manager_test.go index e10e820e84..ad04d2c7bd 100644 --- a/share/p2p/peers/manager_test.go +++ b/share/p2p/peers/manager_test.go @@ -274,7 +274,7 @@ func TestManager(t *testing.T) { // create shrexSub msg with height lower than first header from headerSub msg := shrexsub.Notification{ DataHash: share.DataHash("datahash"), - Height: uint64(h.Height() - 1), + Height: h.Height() - 1, } result := manager.Validate(ctx, "peer", msg) require.Equal(t, pubsub.ValidationIgnore, result) @@ -298,7 +298,7 @@ func TestManager(t *testing.T) { // create shrexSub msg with height lower than first header from headerSub msg := shrexsub.Notification{ DataHash: share.DataHash("datahash"), - Height: uint64(h.Height() - 1), + Height: h.Height() - 1, } result := manager.Validate(ctx, "peer", msg) require.Equal(t, pubsub.ValidationIgnore, result) @@ -537,7 +537,7 @@ func (s *subLock) Subscribe() (libhead.Subscription[*header.ExtendedHeader], err return s, nil } -func (s *subLock) AddValidator(func(context.Context, *header.ExtendedHeader) pubsub.ValidationResult) error { +func (s *subLock) SetVerifier(func(context.Context, *header.ExtendedHeader) error) error { panic("implement me") } @@ -561,6 +561,6 @@ func (s *subLock) Cancel() { func newShrexSubMsg(h *header.ExtendedHeader) shrexsub.Notification { return shrexsub.Notification{ DataHash: h.DataHash.Bytes(), - Height: uint64(h.Height()), + Height: h.Height(), } } diff --git a/share/p2p/peers/metrics.go b/share/p2p/peers/metrics.go index bf4d544d9f..95d1ce65d9 100644 --- a/share/p2p/peers/metrics.go +++ b/share/p2p/peers/metrics.go @@ -8,11 +8,9 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/instrument/asyncint64" - "go.opentelemetry.io/otel/metric/instrument/syncint64" + "go.opentelemetry.io/otel/metric" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" ) @@ -52,7 +50,7 @@ const ( ) var ( - meter = global.MeterProvider().Meter("shrex_peer_manager") + meter = otel.Meter("shrex_peer_manager") ) type blacklistPeerReason string @@ -64,63 +62,63 @@ type poolStatus string type peerSource string type metrics struct { - getPeer syncint64.Counter // attributes: source, is_instant - getPeerWaitTimeHistogram syncint64.Histogram // attributes: source - getPeerPoolSizeHistogram syncint64.Histogram // attributes: source - doneResult syncint64.Counter // attributes: source, done_result - validationResult syncint64.Counter // attributes: validation_result - - shrexPools asyncint64.Gauge // attributes: pool_status - fullNodesPool asyncint64.Gauge // attributes: pool_status + getPeer metric.Int64Counter // attributes: source, is_instant + getPeerWaitTimeHistogram metric.Int64Histogram // attributes: source + getPeerPoolSizeHistogram metric.Int64Histogram // attributes: source + doneResult metric.Int64Counter // attributes: source, done_result + validationResult metric.Int64Counter // attributes: validation_result + + shrexPools metric.Int64ObservableGauge // attributes: pool_status + fullNodesPool metric.Int64ObservableGauge // attributes: pool_status blacklistedPeersByReason sync.Map - blacklistedPeers asyncint64.Gauge // attributes: blacklist_reason + blacklistedPeers metric.Int64ObservableGauge // attributes: blacklist_reason } func initMetrics(manager *Manager) (*metrics, error) { - getPeer, err := meter.SyncInt64().Counter("peer_manager_get_peer_counter", - instrument.WithDescription("get peer counter")) + getPeer, err := meter.Int64Counter("peer_manager_get_peer_counter", + metric.WithDescription("get peer counter")) if err != nil { return nil, err } - getPeerWaitTimeHistogram, err := meter.SyncInt64().Histogram("peer_manager_get_peer_ms_time_hist", - instrument.WithDescription("get peer time histogram(ms), observed only for async get(is_instant = false)")) + getPeerWaitTimeHistogram, err := meter.Int64Histogram("peer_manager_get_peer_ms_time_hist", + metric.WithDescription("get peer time histogram(ms), observed only for async get(is_instant = false)")) if err != nil { return nil, err } - getPeerPoolSizeHistogram, err := meter.SyncInt64().Histogram("peer_manager_get_peer_pool_size_hist", - instrument.WithDescription("amount of available active peers in pool at time when get was called")) + getPeerPoolSizeHistogram, err := meter.Int64Histogram("peer_manager_get_peer_pool_size_hist", + metric.WithDescription("amount of available active peers in pool at time when get was called")) if err != nil { return nil, err } - doneResult, err := meter.SyncInt64().Counter("peer_manager_done_result_counter", - instrument.WithDescription("done results counter")) + doneResult, err := meter.Int64Counter("peer_manager_done_result_counter", + metric.WithDescription("done results counter")) if err != nil { return nil, err } - validationResult, err := meter.SyncInt64().Counter("peer_manager_validation_result_counter", - instrument.WithDescription("validation result counter")) + validationResult, err := meter.Int64Counter("peer_manager_validation_result_counter", + metric.WithDescription("validation result counter")) if err != nil { return nil, err } - shrexPools, err := meter.AsyncInt64().Gauge("peer_manager_pools_gauge", - instrument.WithDescription("pools amount")) + shrexPools, err := meter.Int64ObservableGauge("peer_manager_pools_gauge", + metric.WithDescription("pools amount")) if err != nil { return nil, err } - fullNodesPool, err := meter.AsyncInt64().Gauge("peer_manager_full_nodes_gauge", - instrument.WithDescription("full nodes pool peers amount")) + fullNodesPool, err := meter.Int64ObservableGauge("peer_manager_full_nodes_gauge", + metric.WithDescription("full nodes pool peers amount")) if err != nil { return nil, err } - blacklisted, err := meter.AsyncInt64().Gauge("peer_manager_blacklisted_peers", - instrument.WithDescription("blacklisted peers amount")) + blacklisted, err := meter.Int64ObservableGauge("peer_manager_blacklisted_peers", + metric.WithDescription("blacklisted peers amount")) if err != nil { return nil, err } @@ -136,33 +134,31 @@ func initMetrics(manager *Manager) (*metrics, error) { blacklistedPeers: blacklisted, } - err = meter.RegisterCallback( - []instrument.Asynchronous{ - shrexPools, - fullNodesPool, - blacklisted, - }, - func(ctx context.Context) { - for poolStatus, count := range manager.shrexPools() { - shrexPools.Observe(ctx, count, - attribute.String(poolStatusKey, string(poolStatus))) - } - - fullNodesPool.Observe(ctx, int64(manager.fullNodes.len()), - attribute.String(peerStatusKey, string(peerStatusActive))) - fullNodesPool.Observe(ctx, int64(manager.fullNodes.cooldown.len()), - attribute.String(peerStatusKey, string(peerStatusCooldown))) - - metrics.blacklistedPeersByReason.Range(func(key, value any) bool { - reason := key.(blacklistPeerReason) - amount := value.(int) - blacklisted.Observe(ctx, int64(amount), - attribute.String(blacklistPeerReasonKey, string(reason))) - return true - }) - }, - ) + callback := func(ctx context.Context, observer metric.Observer) error { + for poolStatus, count := range manager.shrexPools() { + observer.ObserveInt64(shrexPools, count, + metric.WithAttributes( + attribute.String(poolStatusKey, string(poolStatus)))) + } + observer.ObserveInt64(fullNodesPool, int64(manager.fullNodes.len()), + metric.WithAttributes( + attribute.String(peerStatusKey, string(peerStatusActive)))) + observer.ObserveInt64(fullNodesPool, int64(manager.fullNodes.cooldown.len()), + metric.WithAttributes( + attribute.String(peerStatusKey, string(peerStatusCooldown)))) + + metrics.blacklistedPeersByReason.Range(func(key, value any) bool { + reason := key.(blacklistPeerReason) + amount := value.(int) + observer.ObserveInt64(blacklisted, int64(amount), + metric.WithAttributes( + attribute.String(blacklistPeerReasonKey, string(reason)))) + return true + }) + return nil + } + _, err = meter.RegisterCallback(callback, shrexPools, fullNodesPool, blacklisted) if err != nil { return nil, fmt.Errorf("registering metrics callback: %w", err) } @@ -180,17 +176,20 @@ func (m *metrics) observeGetPeer( ctx = context.Background() } m.getPeer.Add(ctx, 1, - attribute.String(sourceKey, string(source)), - attribute.Bool(isInstantKey, waitTime == 0)) + metric.WithAttributes( + attribute.String(sourceKey, string(source)), + attribute.Bool(isInstantKey, waitTime == 0))) if source == sourceShrexSub { m.getPeerPoolSizeHistogram.Record(ctx, int64(poolSize), - attribute.String(sourceKey, string(source))) + metric.WithAttributes( + attribute.String(sourceKey, string(source)))) } // record wait time only for async gets if waitTime > 0 { m.getPeerWaitTimeHistogram.Record(ctx, waitTime.Milliseconds(), - attribute.String(sourceKey, string(source))) + metric.WithAttributes( + attribute.String(sourceKey, string(source)))) } } @@ -201,8 +200,9 @@ func (m *metrics) observeDoneResult(source peerSource, result result) { ctx := context.Background() m.doneResult.Add(ctx, 1, - attribute.String(sourceKey, string(source)), - attribute.String(doneResultKey, string(result))) + metric.WithAttributes( + attribute.String(sourceKey, string(source)), + attribute.String(doneResultKey, string(result)))) } // validationObserver is a middleware that observes validation results as metrics @@ -230,7 +230,8 @@ func (m *metrics) validationObserver(validator shrexsub.ValidatorFn) shrexsub.Va } m.validationResult.Add(ctx, 1, - attribute.String(validationResultKey, resStr)) + metric.WithAttributes( + attribute.String(validationResultKey, resStr))) return res } } diff --git a/share/p2p/shrexeds/exchange_test.go b/share/p2p/shrexeds/exchange_test.go index b0e11e3587..14e220a8f9 100644 --- a/share/p2p/shrexeds/exchange_test.go +++ b/share/p2p/shrexeds/exchange_test.go @@ -16,8 +16,8 @@ import ( "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/p2p" ) @@ -34,8 +34,9 @@ func TestExchange_RequestEDS(t *testing.T) { // Testcase: EDS is immediately available t.Run("EDS_Available", func(t *testing.T) { - eds := share.RandEDS(t, 4) - dah := da.NewDataAvailabilityHeader(eds) + eds := edstest.RandEDS(t, 4) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) err = store.Put(ctx, dah.Hash(), eds) require.NoError(t, err) @@ -46,20 +47,27 @@ func TestExchange_RequestEDS(t *testing.T) { // Testcase: EDS is unavailable initially, but is found after multiple requests t.Run("EDS_AvailableAfterDelay", func(t *testing.T) { - storageDelay := time.Second - eds := share.RandEDS(t, 4) - dah := da.NewDataAvailabilityHeader(eds) + eds := edstest.RandEDS(t, 4) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) + + lock := make(chan struct{}) go func() { - time.Sleep(storageDelay) + <-lock err = store.Put(ctx, dah.Hash(), eds) - // require.NoError(t, err) + require.NoError(t, err) + lock <- struct{}{} }() requestedEDS, err := client.RequestEDS(ctx, dah.Hash(), server.host.ID()) assert.ErrorIs(t, err, p2p.ErrNotFound) assert.Nil(t, requestedEDS) - time.Sleep(storageDelay * 2) + // unlock write + lock <- struct{}{} + // wait for write to finish + <-lock + requestedEDS, err = client.RequestEDS(ctx, dah.Hash(), server.host.ID()) assert.NoError(t, err) assert.Equal(t, eds.Flattened(), requestedEDS.Flattened()) @@ -76,9 +84,10 @@ func TestExchange_RequestEDS(t *testing.T) { t.Run("EDS_err_not_found", func(t *testing.T) { timeoutCtx, cancel := context.WithTimeout(ctx, time.Second) t.Cleanup(cancel) - eds := share.RandEDS(t, 4) - dah := da.NewDataAvailabilityHeader(eds) - _, err := client.RequestEDS(timeoutCtx, dah.Hash(), server.host.ID()) + eds := edstest.RandEDS(t, 4) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) + _, err = client.RequestEDS(timeoutCtx, dah.Hash(), server.host.ID()) require.ErrorIs(t, err, p2p.ErrNotFound) }) diff --git a/share/p2p/shrexnd/client.go b/share/p2p/shrexnd/client.go index ab407126de..86c5150095 100644 --- a/share/p2p/shrexnd/client.go +++ b/share/p2p/shrexnd/client.go @@ -15,10 +15,8 @@ import ( "github.com/celestiaorg/go-libp2p-messenger/serde" "github.com/celestiaorg/nmt" - "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/p2p" pb "github.com/celestiaorg/celestia-node/share/p2p/shrexnd/pb" ) @@ -47,16 +45,20 @@ func NewClient(params *Parameters, host host.Host) (*Client, error) { } // RequestND requests namespaced data from the given peer. -// Returns valid data with its verified inclusion against the share.Root. +// Returns NamespacedShares with unverified inclusion proofs against the share.Root. func (c *Client) RequestND( ctx context.Context, root *share.Root, - nID namespace.ID, + namespace share.Namespace, peer peer.ID, ) (share.NamespacedShares, error) { - shares, err := c.doRequest(ctx, root, nID, peer) + if err := namespace.ValidateForData(); err != nil { + return nil, err + } + + shares, err := c.doRequest(ctx, root, namespace, peer) if err == nil { - return shares, err + return shares, nil } if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { c.metrics.ObserveRequests(ctx, 1, p2p.StatusTimeout) @@ -71,7 +73,7 @@ func (c *Client) RequestND( return nil, context.DeadlineExceeded } } - if err != p2p.ErrNotFound && err != share.ErrNamespaceNotFound { + if err != p2p.ErrNotFound && err != p2p.ErrRateLimited { log.Warnw("client-nd: peer returned err", "err", err) } return nil, err @@ -80,7 +82,7 @@ func (c *Client) RequestND( func (c *Client) doRequest( ctx context.Context, root *share.Root, - nID namespace.ID, + namespace share.Namespace, peerID peer.ID, ) (share.NamespacedShares, error) { stream, err := c.host.NewStream(ctx, peerID, c.protocolID) @@ -92,12 +94,13 @@ func (c *Client) doRequest( c.setStreamDeadlines(ctx, stream) req := &pb.GetSharesByNamespaceRequest{ - RootHash: root.Hash(), - NamespaceId: nID, + RootHash: root.Hash(), + Namespace: namespace, } _, err = serde.Write(stream, req) if err != nil { + c.metrics.ObserveRequests(ctx, 1, p2p.StatusSendReqErr) stream.Reset() //nolint:errcheck return nil, fmt.Errorf("client-nd: writing request: %w", err) } @@ -107,50 +110,70 @@ func (c *Client) doRequest( log.Debugw("client-nd: closing write side of the stream", "err", err) } - var resp pb.GetSharesByNamespaceResponse - _, err = serde.Read(stream, &resp) + if err := c.readStatus(ctx, stream); err != nil { + return nil, err + } + return c.readNamespacedShares(ctx, stream) +} + +func (c *Client) readStatus(ctx context.Context, stream network.Stream) error { + var resp pb.GetSharesByNamespaceStatusResponse + _, err := serde.Read(stream, &resp) if err != nil { // server is overloaded and closed the stream if errors.Is(err, io.EOF) { c.metrics.ObserveRequests(ctx, 1, p2p.StatusRateLimited) - return nil, p2p.ErrNotFound + return p2p.ErrRateLimited } + c.metrics.ObserveRequests(ctx, 1, p2p.StatusReadRespErr) stream.Reset() //nolint:errcheck - return nil, fmt.Errorf("client-nd: reading response: %w", err) - } - - if err = c.statusToErr(ctx, resp.Status); err != nil { - return nil, fmt.Errorf("client-nd: response code is not OK: %w", err) + return fmt.Errorf("client-nd: reading status response: %w", err) } - shares, err := convertToNamespacedShares(resp.Rows) - if err != nil { - return nil, fmt.Errorf("client-nd: converting response to shares: %w", err) - } - return shares, nil + return c.convertStatusToErr(ctx, resp.Status) } -// convertToNamespacedShares converts proto Rows to share.NamespacedShares -func convertToNamespacedShares(rows []*pb.Row) (share.NamespacedShares, error) { - shares := make([]share.NamespacedRow, 0, len(rows)) - for _, row := range rows { - var proof *nmt.Proof +// readNamespacedShares converts proto Rows to share.NamespacedShares +func (c *Client) readNamespacedShares( + ctx context.Context, + stream network.Stream, +) (share.NamespacedShares, error) { + var shares share.NamespacedShares + for { + var row pb.NamespaceRowResponse + _, err := serde.Read(stream, &row) + if err != nil { + if errors.Is(err, io.EOF) { + // all data is received and steam is closed by server + return shares, nil + } + c.metrics.ObserveRequests(ctx, 1, p2p.StatusReadRespErr) + return nil, err + } + var proof nmt.Proof if row.Proof != nil { - tmpProof := nmt.NewInclusionProof( - int(row.Proof.Start), - int(row.Proof.End), - row.Proof.Nodes, - ipld.NMTIgnoreMaxNamespace, - ) - proof = &tmpProof + if len(row.Shares) != 0 { + proof = nmt.NewInclusionProof( + int(row.Proof.Start), + int(row.Proof.End), + row.Proof.Nodes, + row.Proof.IsMaxNamespaceIgnored, + ) + } else { + proof = nmt.NewAbsenceProof( + int(row.Proof.Start), + int(row.Proof.End), + row.Proof.Nodes, + row.Proof.LeafHash, + row.Proof.IsMaxNamespaceIgnored, + ) + } } - shares = append(shares, share.NamespacedRow{ Shares: row.Shares, - Proof: proof, + Proof: &proof, }) } - return shares, nil } func (c *Client) setStreamDeadlines(ctx context.Context, stream network.Stream) { @@ -181,18 +204,16 @@ func (c *Client) setStreamDeadlines(ctx context.Context, stream network.Stream) } } -func (c *Client) statusToErr(ctx context.Context, code pb.StatusCode) error { - switch code { +func (c *Client) convertStatusToErr(ctx context.Context, status pb.StatusCode) error { + switch status { case pb.StatusCode_OK: c.metrics.ObserveRequests(ctx, 1, p2p.StatusSuccess) return nil case pb.StatusCode_NOT_FOUND: c.metrics.ObserveRequests(ctx, 1, p2p.StatusNotFound) return p2p.ErrNotFound - case pb.StatusCode_NAMESPACE_NOT_FOUND: - return share.ErrNamespaceNotFound case pb.StatusCode_INVALID: - log.Debug("client-nd: invalid request") + log.Warn("client-nd: invalid request") fallthrough case pb.StatusCode_INTERNAL: fallthrough diff --git a/share/p2p/shrexnd/exchange_test.go b/share/p2p/shrexnd/exchange_test.go index 8c5a132fdc..1ca0736cd6 100644 --- a/share/p2p/shrexnd/exchange_test.go +++ b/share/p2p/shrexnd/exchange_test.go @@ -14,13 +14,13 @@ import ( "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-app/pkg/namespace" - nmtnamespace "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/p2p" + "github.com/celestiaorg/celestia-node/share/sharetest" ) func TestExchange_RequestND_NotFound(t *testing.T) { @@ -35,8 +35,8 @@ func TestExchange_RequestND_NotFound(t *testing.T) { t.Cleanup(cancel) root := share.Root{} - nID := make([]byte, namespace.NamespaceSize) - _, err := client.RequestND(ctx, &root, nID, server.host.ID()) + namespace := sharetest.RandV0Namespace() + _, err := client.RequestND(ctx, &root, namespace, server.host.ID()) require.ErrorIs(t, err, p2p.ErrNotFound) }) @@ -44,13 +44,15 @@ func TestExchange_RequestND_NotFound(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Second) t.Cleanup(cancel) - eds := share.RandEDS(t, 4) - dah := da.NewDataAvailabilityHeader(eds) + eds := edstest.RandEDS(t, 4) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) require.NoError(t, edsStore.Put(ctx, dah.Hash(), eds)) - randNID := dah.RowRoots[(len(dah.RowRoots)-1)/2][:namespace.NamespaceSize] - _, err := client.RequestND(ctx, &dah, randNID, server.host.ID()) - require.ErrorIs(t, err, share.ErrNamespaceNotFound) + randNamespace := dah.RowRoots[(len(dah.RowRoots)-1)/2][:share.NamespaceSize] + emptyShares, err := client.RequestND(ctx, &dah, randNamespace, server.host.ID()) + require.NoError(t, err) + require.Empty(t, emptyShares.Flatten()) }) } @@ -91,14 +93,14 @@ func TestExchange_RequestND(t *testing.T) { // take server concurrency slots with blocked requests for i := 0; i < rateLimit; i++ { go func(i int) { - client.RequestND(ctx, nil, nil, server.host.ID()) //nolint:errcheck + client.RequestND(ctx, nil, sharetest.RandV0Namespace(), server.host.ID()) //nolint:errcheck }(i) } // wait until all server slots are taken wg.Wait() - _, err = client.RequestND(ctx, nil, nil, server.host.ID()) - require.ErrorIs(t, err, p2p.ErrNotFound) + _, err = client.RequestND(ctx, nil, sharetest.RandV0Namespace(), server.host.ID()) + require.ErrorIs(t, err, p2p.ErrRateLimited) }) } @@ -117,9 +119,9 @@ func (m notFoundGetter) GetEDS( } func (m notFoundGetter) GetSharesByNamespace( - _ context.Context, _ *share.Root, _ nmtnamespace.ID, + _ context.Context, _ *share.Root, _ share.Namespace, ) (share.NamespacedShares, error) { - return nil, share.ErrNamespaceNotFound + return nil, nil } func newStore(t *testing.T) *eds.Store { diff --git a/share/p2p/shrexnd/params.go b/share/p2p/shrexnd/params.go index a645267962..8489627a07 100644 --- a/share/p2p/shrexnd/params.go +++ b/share/p2p/shrexnd/params.go @@ -8,7 +8,7 @@ import ( "github.com/celestiaorg/celestia-node/share/p2p" ) -const protocolString = "/shrex/nd/v0.0.2" +const protocolString = "/shrex/nd/v0.0.3" var log = logging.Logger("shrex/nd") diff --git a/share/p2p/shrexnd/pb/share.pb.go b/share/p2p/shrexnd/pb/share.pb.go index d902570410..7e3c11416f 100644 --- a/share/p2p/shrexnd/pb/share.pb.go +++ b/share/p2p/shrexnd/pb/share.pb.go @@ -5,6 +5,7 @@ package share_p2p_shrex_nd import ( fmt "fmt" + pb "github.com/celestiaorg/nmt/pb" proto "github.com/gogo/protobuf/proto" io "io" math "math" @@ -25,11 +26,10 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type StatusCode int32 const ( - StatusCode_INVALID StatusCode = 0 - StatusCode_OK StatusCode = 1 - StatusCode_NOT_FOUND StatusCode = 2 - StatusCode_INTERNAL StatusCode = 3 - StatusCode_NAMESPACE_NOT_FOUND StatusCode = 4 + StatusCode_INVALID StatusCode = 0 + StatusCode_OK StatusCode = 1 + StatusCode_NOT_FOUND StatusCode = 2 + StatusCode_INTERNAL StatusCode = 3 ) var StatusCode_name = map[int32]string{ @@ -37,15 +37,13 @@ var StatusCode_name = map[int32]string{ 1: "OK", 2: "NOT_FOUND", 3: "INTERNAL", - 4: "NAMESPACE_NOT_FOUND", } var StatusCode_value = map[string]int32{ - "INVALID": 0, - "OK": 1, - "NOT_FOUND": 2, - "INTERNAL": 3, - "NAMESPACE_NOT_FOUND": 4, + "INVALID": 0, + "OK": 1, + "NOT_FOUND": 2, + "INTERNAL": 3, } func (x StatusCode) String() string { @@ -57,8 +55,8 @@ func (StatusCode) EnumDescriptor() ([]byte, []int) { } type GetSharesByNamespaceRequest struct { - RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` - NamespaceId []byte `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + Namespace []byte `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` } func (m *GetSharesByNamespaceRequest) Reset() { *m = GetSharesByNamespaceRequest{} } @@ -101,30 +99,29 @@ func (m *GetSharesByNamespaceRequest) GetRootHash() []byte { return nil } -func (m *GetSharesByNamespaceRequest) GetNamespaceId() []byte { +func (m *GetSharesByNamespaceRequest) GetNamespace() []byte { if m != nil { - return m.NamespaceId + return m.Namespace } return nil } -type GetSharesByNamespaceResponse struct { +type GetSharesByNamespaceStatusResponse struct { Status StatusCode `protobuf:"varint,1,opt,name=status,proto3,enum=share.p2p.shrex.nd.StatusCode" json:"status,omitempty"` - Rows []*Row `protobuf:"bytes,2,rep,name=rows,proto3" json:"rows,omitempty"` } -func (m *GetSharesByNamespaceResponse) Reset() { *m = GetSharesByNamespaceResponse{} } -func (m *GetSharesByNamespaceResponse) String() string { return proto.CompactTextString(m) } -func (*GetSharesByNamespaceResponse) ProtoMessage() {} -func (*GetSharesByNamespaceResponse) Descriptor() ([]byte, []int) { +func (m *GetSharesByNamespaceStatusResponse) Reset() { *m = GetSharesByNamespaceStatusResponse{} } +func (m *GetSharesByNamespaceStatusResponse) String() string { return proto.CompactTextString(m) } +func (*GetSharesByNamespaceStatusResponse) ProtoMessage() {} +func (*GetSharesByNamespaceStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ed9f13149b0de397, []int{1} } -func (m *GetSharesByNamespaceResponse) XXX_Unmarshal(b []byte) error { +func (m *GetSharesByNamespaceStatusResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *GetSharesByNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetSharesByNamespaceStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_GetSharesByNamespaceResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_GetSharesByNamespaceStatusResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -134,49 +131,42 @@ func (m *GetSharesByNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) return b[:n], nil } } -func (m *GetSharesByNamespaceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSharesByNamespaceResponse.Merge(m, src) +func (m *GetSharesByNamespaceStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSharesByNamespaceStatusResponse.Merge(m, src) } -func (m *GetSharesByNamespaceResponse) XXX_Size() int { +func (m *GetSharesByNamespaceStatusResponse) XXX_Size() int { return m.Size() } -func (m *GetSharesByNamespaceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetSharesByNamespaceResponse.DiscardUnknown(m) +func (m *GetSharesByNamespaceStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetSharesByNamespaceStatusResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetSharesByNamespaceResponse proto.InternalMessageInfo +var xxx_messageInfo_GetSharesByNamespaceStatusResponse proto.InternalMessageInfo -func (m *GetSharesByNamespaceResponse) GetStatus() StatusCode { +func (m *GetSharesByNamespaceStatusResponse) GetStatus() StatusCode { if m != nil { return m.Status } return StatusCode_INVALID } -func (m *GetSharesByNamespaceResponse) GetRows() []*Row { - if m != nil { - return m.Rows - } - return nil -} - -type Row struct { - Shares [][]byte `protobuf:"bytes,1,rep,name=shares,proto3" json:"shares,omitempty"` - Proof *Proof `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` +type NamespaceRowResponse struct { + Shares [][]byte `protobuf:"bytes,1,rep,name=shares,proto3" json:"shares,omitempty"` + Proof *pb.Proof `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` } -func (m *Row) Reset() { *m = Row{} } -func (m *Row) String() string { return proto.CompactTextString(m) } -func (*Row) ProtoMessage() {} -func (*Row) Descriptor() ([]byte, []int) { +func (m *NamespaceRowResponse) Reset() { *m = NamespaceRowResponse{} } +func (m *NamespaceRowResponse) String() string { return proto.CompactTextString(m) } +func (*NamespaceRowResponse) ProtoMessage() {} +func (*NamespaceRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ed9f13149b0de397, []int{2} } -func (m *Row) XXX_Unmarshal(b []byte) error { +func (m *NamespaceRowResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Row) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *NamespaceRowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_Row.Marshal(b, m, deterministic) + return xxx_messageInfo_NamespaceRowResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -186,129 +176,64 @@ func (m *Row) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *Row) XXX_Merge(src proto.Message) { - xxx_messageInfo_Row.Merge(m, src) +func (m *NamespaceRowResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceRowResponse.Merge(m, src) } -func (m *Row) XXX_Size() int { +func (m *NamespaceRowResponse) XXX_Size() int { return m.Size() } -func (m *Row) XXX_DiscardUnknown() { - xxx_messageInfo_Row.DiscardUnknown(m) +func (m *NamespaceRowResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceRowResponse.DiscardUnknown(m) } -var xxx_messageInfo_Row proto.InternalMessageInfo +var xxx_messageInfo_NamespaceRowResponse proto.InternalMessageInfo -func (m *Row) GetShares() [][]byte { +func (m *NamespaceRowResponse) GetShares() [][]byte { if m != nil { return m.Shares } return nil } -func (m *Row) GetProof() *Proof { +func (m *NamespaceRowResponse) GetProof() *pb.Proof { if m != nil { return m.Proof } return nil } -type Proof struct { - Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` - End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` - Nodes [][]byte `protobuf:"bytes,3,rep,name=Nodes,proto3" json:"Nodes,omitempty"` -} - -func (m *Proof) Reset() { *m = Proof{} } -func (m *Proof) String() string { return proto.CompactTextString(m) } -func (*Proof) ProtoMessage() {} -func (*Proof) Descriptor() ([]byte, []int) { - return fileDescriptor_ed9f13149b0de397, []int{3} -} -func (m *Proof) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Proof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Proof.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Proof) XXX_Merge(src proto.Message) { - xxx_messageInfo_Proof.Merge(m, src) -} -func (m *Proof) XXX_Size() int { - return m.Size() -} -func (m *Proof) XXX_DiscardUnknown() { - xxx_messageInfo_Proof.DiscardUnknown(m) -} - -var xxx_messageInfo_Proof proto.InternalMessageInfo - -func (m *Proof) GetStart() int64 { - if m != nil { - return m.Start - } - return 0 -} - -func (m *Proof) GetEnd() int64 { - if m != nil { - return m.End - } - return 0 -} - -func (m *Proof) GetNodes() [][]byte { - if m != nil { - return m.Nodes - } - return nil -} - func init() { proto.RegisterEnum("share.p2p.shrex.nd.StatusCode", StatusCode_name, StatusCode_value) proto.RegisterType((*GetSharesByNamespaceRequest)(nil), "share.p2p.shrex.nd.GetSharesByNamespaceRequest") - proto.RegisterType((*GetSharesByNamespaceResponse)(nil), "share.p2p.shrex.nd.GetSharesByNamespaceResponse") - proto.RegisterType((*Row)(nil), "share.p2p.shrex.nd.Row") - proto.RegisterType((*Proof)(nil), "share.p2p.shrex.nd.Proof") + proto.RegisterType((*GetSharesByNamespaceStatusResponse)(nil), "share.p2p.shrex.nd.GetSharesByNamespaceStatusResponse") + proto.RegisterType((*NamespaceRowResponse)(nil), "share.p2p.shrex.nd.NamespaceRowResponse") } func init() { proto.RegisterFile("share/p2p/shrexnd/pb/share.proto", fileDescriptor_ed9f13149b0de397) } var fileDescriptor_ed9f13149b0de397 = []byte{ - // 386 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xc1, 0xae, 0x93, 0x40, - 0x14, 0x86, 0x81, 0xb9, 0xc5, 0x7b, 0x0f, 0x68, 0xc8, 0x68, 0xbc, 0x98, 0x6b, 0x48, 0x65, 0xd5, - 0x68, 0x02, 0x09, 0x26, 0xee, 0x69, 0x8b, 0x4a, 0xac, 0xd3, 0x66, 0x5a, 0x75, 0x65, 0x08, 0x95, - 0x31, 0xb8, 0x90, 0x19, 0x99, 0x69, 0xaa, 0x6b, 0x5f, 0xc0, 0xc7, 0x72, 0xd9, 0xa5, 0x4b, 0xd3, - 0xbe, 0x88, 0x61, 0xa8, 0x76, 0x61, 0x77, 0xfc, 0xff, 0xf9, 0xce, 0x7f, 0xce, 0x21, 0x03, 0x43, - 0x59, 0x97, 0x2d, 0x8b, 0x45, 0x22, 0x62, 0x59, 0xb7, 0xec, 0x6b, 0x53, 0xc5, 0x62, 0x1d, 0x6b, - 0x33, 0x12, 0x2d, 0x57, 0x1c, 0xe3, 0xa3, 0x48, 0x44, 0xa4, 0x89, 0xa8, 0xa9, 0xc2, 0xf7, 0x70, - 0xf3, 0x82, 0xa9, 0x65, 0x57, 0x90, 0xe3, 0x6f, 0xa4, 0xfc, 0xcc, 0xa4, 0x28, 0x3f, 0x30, 0xca, - 0xbe, 0x6c, 0x98, 0x54, 0xf8, 0x06, 0xae, 0x5a, 0xce, 0x55, 0x51, 0x97, 0xb2, 0xf6, 0xcd, 0xa1, - 0x39, 0x72, 0xe9, 0x65, 0x67, 0xbc, 0x2c, 0x65, 0x8d, 0x1f, 0x81, 0xdb, 0xfc, 0x6d, 0x28, 0x3e, - 0x55, 0xbe, 0xa5, 0xeb, 0xce, 0x3f, 0x2f, 0xaf, 0xc2, 0xef, 0x26, 0x3c, 0x3c, 0x9f, 0x2f, 0x05, - 0x6f, 0x24, 0xc3, 0xcf, 0xc0, 0x96, 0xaa, 0x54, 0x1b, 0xa9, 0xd3, 0xef, 0x24, 0x41, 0xf4, 0xff, - 0x92, 0xd1, 0x52, 0x13, 0x13, 0x5e, 0x31, 0x7a, 0xa4, 0xf1, 0x13, 0xb8, 0x68, 0xf9, 0x56, 0xfa, - 0xd6, 0x10, 0x8d, 0x9c, 0xe4, 0xfa, 0x5c, 0x17, 0xe5, 0x5b, 0xaa, 0xa1, 0x90, 0x00, 0xa2, 0x7c, - 0x8b, 0xef, 0x83, 0xad, 0xb1, 0x6e, 0x16, 0x1a, 0xb9, 0xf4, 0xa8, 0x70, 0x0c, 0x03, 0xd1, 0x72, - 0xfe, 0x51, 0x1f, 0xe0, 0x24, 0x0f, 0xce, 0x85, 0x2d, 0x3a, 0x80, 0xf6, 0x5c, 0x98, 0xc1, 0x40, - 0x6b, 0x7c, 0x0f, 0x06, 0x52, 0x95, 0xad, 0xd2, 0xcb, 0x23, 0xda, 0x0b, 0xec, 0x01, 0x62, 0x4d, - 0xff, 0x3b, 0x10, 0xed, 0x3e, 0x3b, 0x8e, 0xf0, 0x8a, 0x49, 0x1f, 0xe9, 0xc1, 0xbd, 0x78, 0xfc, - 0x0e, 0xe0, 0x74, 0x19, 0x76, 0xe0, 0x56, 0x4e, 0xde, 0xa6, 0xb3, 0x7c, 0xea, 0x19, 0xd8, 0x06, - 0x6b, 0xfe, 0xca, 0x33, 0xf1, 0x6d, 0xb8, 0x22, 0xf3, 0x55, 0xf1, 0x7c, 0xfe, 0x86, 0x4c, 0x3d, - 0x0b, 0xbb, 0x70, 0x99, 0x93, 0x55, 0x46, 0x49, 0x3a, 0xf3, 0x10, 0xbe, 0x86, 0xbb, 0x24, 0x7d, - 0x9d, 0x2d, 0x17, 0xe9, 0x24, 0x2b, 0x4e, 0xd8, 0xc5, 0xd8, 0xff, 0xb9, 0x0f, 0xcc, 0xdd, 0x3e, - 0x30, 0x7f, 0xef, 0x03, 0xf3, 0xc7, 0x21, 0x30, 0x76, 0x87, 0xc0, 0xf8, 0x75, 0x08, 0x8c, 0xb5, - 0xad, 0x5f, 0xc2, 0xd3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xba, 0xc1, 0x4e, 0xec, 0x2d, 0x02, - 0x00, 0x00, + // 326 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4f, 0x4b, 0xf3, 0x40, + 0x10, 0xc6, 0x93, 0x96, 0x37, 0x6f, 0x3b, 0xad, 0x35, 0x2c, 0x22, 0xc5, 0xca, 0x52, 0x02, 0x42, + 0xf1, 0xb0, 0x81, 0x08, 0x1e, 0x85, 0xd6, 0xfa, 0xa7, 0x58, 0x52, 0xd9, 0xb6, 0xe2, 0x41, 0x28, + 0x1b, 0xbb, 0x92, 0x8b, 0xd9, 0x35, 0xbb, 0x45, 0xfd, 0x16, 0x7e, 0x2c, 0x8f, 0x3d, 0x7a, 0x94, + 0xf6, 0x8b, 0x48, 0xb6, 0xd1, 0x1c, 0xf4, 0xb6, 0xf3, 0xcc, 0x33, 0xbf, 0x7d, 0x66, 0xa0, 0xad, + 0x62, 0x96, 0x72, 0x5f, 0x06, 0xd2, 0x57, 0x71, 0xca, 0x5f, 0x92, 0xb9, 0x2f, 0x23, 0xdf, 0x88, + 0x44, 0xa6, 0x42, 0x0b, 0x84, 0xf2, 0x22, 0x90, 0xc4, 0x38, 0x48, 0x32, 0xdf, 0x6b, 0xc8, 0xc8, + 0x97, 0xa9, 0x10, 0x0f, 0x1b, 0x8f, 0x77, 0x0b, 0xad, 0x0b, 0xae, 0xc7, 0x99, 0x51, 0xf5, 0x5e, + 0x43, 0xf6, 0xc8, 0x95, 0x64, 0xf7, 0x9c, 0xf2, 0xa7, 0x05, 0x57, 0x1a, 0xb5, 0xa0, 0x9a, 0x0a, + 0xa1, 0x67, 0x31, 0x53, 0x71, 0xd3, 0x6e, 0xdb, 0x9d, 0x3a, 0xad, 0x64, 0xc2, 0x25, 0x53, 0x31, + 0xda, 0x87, 0x6a, 0xf2, 0x3d, 0xd0, 0x2c, 0x99, 0x66, 0x21, 0x78, 0x77, 0xe0, 0xfd, 0x45, 0x1e, + 0x6b, 0xa6, 0x17, 0x8a, 0x72, 0x25, 0x45, 0xa2, 0x38, 0x3a, 0x06, 0x47, 0x19, 0xc5, 0xd0, 0x1b, + 0x01, 0x26, 0xbf, 0x43, 0x93, 0xcd, 0xcc, 0xa9, 0x98, 0x73, 0x9a, 0xbb, 0xbd, 0x29, 0xec, 0x14, + 0x61, 0xc5, 0xf3, 0x0f, 0x6f, 0x17, 0x1c, 0x03, 0xc8, 0x78, 0xe5, 0x4e, 0x9d, 0xe6, 0x15, 0x3a, + 0x80, 0x7f, 0x66, 0x6d, 0x93, 0xb3, 0x16, 0x6c, 0x93, 0xfc, 0x08, 0x11, 0xb9, 0xce, 0x1e, 0x74, + 0xd3, 0x3d, 0x3c, 0x01, 0x28, 0x3e, 0x43, 0x35, 0xf8, 0x3f, 0x08, 0x6f, 0xba, 0xc3, 0x41, 0xdf, + 0xb5, 0x90, 0x03, 0xa5, 0xd1, 0x95, 0x6b, 0xa3, 0x2d, 0xa8, 0x86, 0xa3, 0xc9, 0xec, 0x7c, 0x34, + 0x0d, 0xfb, 0x6e, 0x09, 0xd5, 0xa1, 0x32, 0x08, 0x27, 0x67, 0x34, 0xec, 0x0e, 0xdd, 0x72, 0xaf, + 0xf9, 0xbe, 0xc2, 0xf6, 0x72, 0x85, 0xed, 0xcf, 0x15, 0xb6, 0xdf, 0xd6, 0xd8, 0x5a, 0xae, 0xb1, + 0xf5, 0xb1, 0xc6, 0x56, 0xe4, 0x98, 0x7b, 0x1f, 0x7d, 0x05, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x53, + 0xb4, 0x86, 0xb7, 0x01, 0x00, 0x00, } func (m *GetSharesByNamespaceRequest) Marshal() (dAtA []byte, err error) { @@ -331,10 +256,10 @@ func (m *GetSharesByNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, er _ = i var l int _ = l - if len(m.NamespaceId) > 0 { - i -= len(m.NamespaceId) - copy(dAtA[i:], m.NamespaceId) - i = encodeVarintShare(dAtA, i, uint64(len(m.NamespaceId))) + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintShare(dAtA, i, uint64(len(m.Namespace))) i-- dAtA[i] = 0x12 } @@ -348,7 +273,7 @@ func (m *GetSharesByNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, er return len(dAtA) - i, nil } -func (m *GetSharesByNamespaceResponse) Marshal() (dAtA []byte, err error) { +func (m *GetSharesByNamespaceStatusResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -358,30 +283,16 @@ func (m *GetSharesByNamespaceResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetSharesByNamespaceResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *GetSharesByNamespaceStatusResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetSharesByNamespaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetSharesByNamespaceStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Rows) > 0 { - for iNdEx := len(m.Rows) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rows[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShare(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } if m.Status != 0 { i = encodeVarintShare(dAtA, i, uint64(m.Status)) i-- @@ -390,7 +301,7 @@ func (m *GetSharesByNamespaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, e return len(dAtA) - i, nil } -func (m *Row) Marshal() (dAtA []byte, err error) { +func (m *NamespaceRowResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -400,12 +311,12 @@ func (m *Row) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Row) MarshalTo(dAtA []byte) (int, error) { +func (m *NamespaceRowResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Row) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NamespaceRowResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -434,48 +345,6 @@ func (m *Row) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Proof) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Proof) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Proof) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Nodes) > 0 { - for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Nodes[iNdEx]) - copy(dAtA[i:], m.Nodes[iNdEx]) - i = encodeVarintShare(dAtA, i, uint64(len(m.Nodes[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if m.End != 0 { - i = encodeVarintShare(dAtA, i, uint64(m.End)) - i-- - dAtA[i] = 0x10 - } - if m.Start != 0 { - i = encodeVarintShare(dAtA, i, uint64(m.Start)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - func encodeVarintShare(dAtA []byte, offset int, v uint64) int { offset -= sovShare(v) base := offset @@ -497,14 +366,14 @@ func (m *GetSharesByNamespaceRequest) Size() (n int) { if l > 0 { n += 1 + l + sovShare(uint64(l)) } - l = len(m.NamespaceId) + l = len(m.Namespace) if l > 0 { n += 1 + l + sovShare(uint64(l)) } return n } -func (m *GetSharesByNamespaceResponse) Size() (n int) { +func (m *GetSharesByNamespaceStatusResponse) Size() (n int) { if m == nil { return 0 } @@ -513,16 +382,10 @@ func (m *GetSharesByNamespaceResponse) Size() (n int) { if m.Status != 0 { n += 1 + sovShare(uint64(m.Status)) } - if len(m.Rows) > 0 { - for _, e := range m.Rows { - l = e.Size() - n += 1 + l + sovShare(uint64(l)) - } - } return n } -func (m *Row) Size() (n int) { +func (m *NamespaceRowResponse) Size() (n int) { if m == nil { return 0 } @@ -541,27 +404,6 @@ func (m *Row) Size() (n int) { return n } -func (m *Proof) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Start != 0 { - n += 1 + sovShare(uint64(m.Start)) - } - if m.End != 0 { - n += 1 + sovShare(uint64(m.End)) - } - if len(m.Nodes) > 0 { - for _, b := range m.Nodes { - l = len(b) - n += 1 + l + sovShare(uint64(l)) - } - } - return n -} - func sovShare(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -633,7 +475,7 @@ func (m *GetSharesByNamespaceRequest) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -660,9 +502,9 @@ func (m *GetSharesByNamespaceRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NamespaceId = append(m.NamespaceId[:0], dAtA[iNdEx:postIndex]...) - if m.NamespaceId == nil { - m.NamespaceId = []byte{} + m.Namespace = append(m.Namespace[:0], dAtA[iNdEx:postIndex]...) + if m.Namespace == nil { + m.Namespace = []byte{} } iNdEx = postIndex default: @@ -686,7 +528,7 @@ func (m *GetSharesByNamespaceRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetSharesByNamespaceResponse) Unmarshal(dAtA []byte) error { +func (m *GetSharesByNamespaceStatusResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -709,10 +551,10 @@ func (m *GetSharesByNamespaceResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSharesByNamespaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSharesByNamespaceStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSharesByNamespaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSharesByNamespaceStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -734,40 +576,6 @@ func (m *GetSharesByNamespaceResponse) Unmarshal(dAtA []byte) error { break } } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rows", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShare - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShare - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rows = append(m.Rows, &Row{}) - if err := m.Rows[len(m.Rows)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipShare(dAtA[iNdEx:]) @@ -789,7 +597,7 @@ func (m *GetSharesByNamespaceResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *Row) Unmarshal(dAtA []byte) error { +func (m *NamespaceRowResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -812,10 +620,10 @@ func (m *Row) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Row: wiretype end group for non-group") + return fmt.Errorf("proto: NamespaceRowResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Row: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NamespaceRowResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -880,7 +688,7 @@ func (m *Row) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Proof == nil { - m.Proof = &Proof{} + m.Proof = &pb.Proof{} } if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -907,126 +715,6 @@ func (m *Row) Unmarshal(dAtA []byte) error { } return nil } -func (m *Proof) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Proof: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Proof: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) - } - m.Start = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Start |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) - } - m.End = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.End |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthShare - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthShare - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Nodes = append(m.Nodes, make([]byte, postIndex-iNdEx)) - copy(m.Nodes[len(m.Nodes)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShare(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShare - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipShare(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/share/p2p/shrexnd/pb/share.proto b/share/p2p/shrexnd/pb/share.proto index 3d6a896641..a5bdbfa071 100644 --- a/share/p2p/shrexnd/pb/share.proto +++ b/share/p2p/shrexnd/pb/share.proto @@ -1,15 +1,15 @@ syntax = "proto3"; package share.p2p.shrex.nd; +import "pb/proof.proto"; message GetSharesByNamespaceRequest{ bytes root_hash = 1; - bytes namespace_id = 2; + bytes namespace = 2; } -message GetSharesByNamespaceResponse{ +message GetSharesByNamespaceStatusResponse{ StatusCode status = 1; - repeated Row rows = 2; } enum StatusCode { @@ -17,16 +17,9 @@ enum StatusCode { OK = 1; NOT_FOUND = 2; INTERNAL = 3; - NAMESPACE_NOT_FOUND = 4; }; -message Row { +message NamespaceRowResponse { repeated bytes shares = 1; - Proof proof = 2; -} - -message Proof { - int64 start = 1; - int64 end = 2; - repeated bytes Nodes = 3; + proof.pb.Proof proof = 2; } diff --git a/share/p2p/shrexnd/server.go b/share/p2p/shrexnd/server.go index 67f64b8393..153bbbb1ba 100644 --- a/share/p2p/shrexnd/server.go +++ b/share/p2p/shrexnd/server.go @@ -2,7 +2,7 @@ package shrexnd import ( "context" - "encoding/hex" + "crypto/sha256" "errors" "fmt" "time" @@ -10,14 +10,13 @@ import ( "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/protocol" - "github.com/minio/sha256-simd" "go.uber.org/zap" "github.com/celestiaorg/go-libp2p-messenger/serde" + nmt_pb "github.com/celestiaorg/nmt/pb" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/p2p" pb "github.com/celestiaorg/celestia-node/share/p2p/shrexnd/pb" ) @@ -30,8 +29,9 @@ type Server struct { host host.Host protocolID protocol.ID - getter share.Getter - store *eds.Store + handler network.StreamHandler + getter share.Getter + store *eds.Store params *Parameters middleware *p2p.Middleware @@ -53,18 +53,16 @@ func NewServer(params *Parameters, host host.Host, store *eds.Store, getter shar middleware: p2p.NewMiddleware(params.ConcurrencyLimit), } + ctx, cancel := context.WithCancel(context.Background()) + srv.cancel = cancel + + srv.handler = srv.middleware.RateLimitHandler(srv.streamHandler(ctx)) return srv, nil } // Start starts the server func (srv *Server) Start(context.Context) error { - ctx, cancel := context.WithCancel(context.Background()) - srv.cancel = cancel - - handler := func(s network.Stream) { - srv.handleNamespacedData(ctx, s) - } - srv.host.SetStreamHandler(srv.protocolID, srv.middleware.RateLimitHandler(handler)) + srv.host.SetStreamHandler(srv.protocolID, srv.handler) return nil } @@ -75,6 +73,24 @@ func (srv *Server) Stop(context.Context) error { return nil } +func (srv *Server) streamHandler(ctx context.Context) network.StreamHandler { + return func(s network.Stream) { + err := srv.handleNamespacedData(ctx, s) + if err != nil { + s.Reset() //nolint:errcheck + return + } + if err = s.Close(); err != nil { + log.Debugw("server: closing stream", "err", err) + } + } +} + +// SetHandler sets server handler +func (srv *Server) SetHandler(handler network.StreamHandler) { + srv.handler = handler +} + func (srv *Server) observeRateLimitedRequests() { numRateLimited := srv.middleware.DrainCounter() if numRateLimited > 0 { @@ -82,159 +98,159 @@ func (srv *Server) observeRateLimitedRequests() { } } -func (srv *Server) handleNamespacedData(ctx context.Context, stream network.Stream) { - logger := log.With("peer", stream.Conn().RemotePeer().String()) - logger.Debug("server: handling nd request") +func (srv *Server) handleNamespacedData(ctx context.Context, stream network.Stream) error { + logger := log.With("source", "server", "peer", stream.Conn().RemotePeer().String()) + logger.Debug("handling nd request") srv.observeRateLimitedRequests() + req, err := srv.readRequest(logger, stream) + if err != nil { + logger.Warnw("read request", "err", err) + srv.metrics.ObserveRequests(ctx, 1, p2p.StatusBadRequest) + return err + } + + logger = logger.With("namespace", share.Namespace(req.Namespace).String(), + "hash", share.DataHash(req.RootHash).String()) + + ctx, cancel := context.WithTimeout(ctx, srv.params.HandleRequestTimeout) + defer cancel() + + shares, status, err := srv.getNamespaceData(ctx, req.RootHash, req.Namespace) + if err != nil { + // server should respond with status regardless if there was an error getting data + sendErr := srv.respondStatus(ctx, logger, stream, status) + if sendErr != nil { + logger.Errorw("sending response", "err", sendErr) + srv.metrics.ObserveRequests(ctx, 1, p2p.StatusSendRespErr) + } + logger.Errorw("handling request", "err", err) + return errors.Join(err, sendErr) + } + + err = srv.respondStatus(ctx, logger, stream, status) + if err != nil { + logger.Errorw("sending response", "err", err) + srv.metrics.ObserveRequests(ctx, 1, p2p.StatusSendRespErr) + return err + } + + err = srv.sendNamespacedShares(shares, stream) + if err != nil { + logger.Errorw("send nd data", "err", err) + srv.metrics.ObserveRequests(ctx, 1, p2p.StatusSendRespErr) + return err + } + return nil +} +func (srv *Server) readRequest( + logger *zap.SugaredLogger, + stream network.Stream, +) (*pb.GetSharesByNamespaceRequest, error) { err := stream.SetReadDeadline(time.Now().Add(srv.params.ServerReadTimeout)) if err != nil { - logger.Debugw("server: setting read deadline", "err", err) + logger.Debugw("setting read deadline", "err", err) } var req pb.GetSharesByNamespaceRequest _, err = serde.Read(stream, &req) if err != nil { - logger.Warnw("server: reading request", "err", err) - stream.Reset() //nolint:errcheck - return + return nil, fmt.Errorf("reading request: %w", err) + } - logger = logger.With("namespaceId", hex.EncodeToString(req.NamespaceId), "hash", share.DataHash(req.RootHash).String()) - logger.Debugw("server: new request") + logger.Debugw("new request") err = stream.CloseRead() if err != nil { - logger.Debugw("server: closing read side of the stream", "err", err) + logger.Debugw("closing read side of the stream", "err", err) } err = validateRequest(req) if err != nil { - logger.Warnw("server: invalid request", "err", err) - stream.Reset() //nolint:errcheck - return + return nil, fmt.Errorf("invalid request: %w", err) } + return &req, nil +} - ctx, cancel := context.WithTimeout(ctx, srv.params.HandleRequestTimeout) - defer cancel() - - dah, err := srv.store.GetDAH(ctx, req.RootHash) +func (srv *Server) getNamespaceData(ctx context.Context, + hash share.DataHash, namespace share.Namespace) (share.NamespacedShares, pb.StatusCode, error) { + dah, err := srv.store.GetDAH(ctx, hash) if err != nil { if errors.Is(err, eds.ErrNotFound) { - logger.Warn("server: DAH not found") - srv.respondNotFoundError(ctx, logger, stream) - return + return nil, pb.StatusCode_NOT_FOUND, nil } - logger.Errorw("server: retrieving DAH", "err", err) - srv.respondInternalError(ctx, logger, stream) - return + return nil, pb.StatusCode_INTERNAL, fmt.Errorf("retrieving DAH: %w", err) } - shares, err := srv.getter.GetSharesByNamespace(ctx, dah, req.NamespaceId) - switch { - case errors.Is(err, share.ErrNotFound): - logger.Warn("server: nd not found") - srv.respondNotFoundError(ctx, logger, stream) - return - case errors.Is(err, share.ErrNamespaceNotFound): - srv.respondNamespaceNotFoundError(ctx, logger, stream) - return - case err != nil: - logger.Errorw("server: retrieving shares", "err", err) - srv.respondInternalError(ctx, logger, stream) - return - } - - resp := namespacedSharesToResponse(shares) - srv.respond(ctx, logger, stream, resp) -} - -// validateRequest checks correctness of the request -func validateRequest(req pb.GetSharesByNamespaceRequest) error { - if len(req.NamespaceId) != ipld.NamespaceSize { - return fmt.Errorf("incorrect namespace id length: %v", len(req.NamespaceId)) - } - if len(req.RootHash) != sha256.Size { - return fmt.Errorf("incorrect root hash length: %v", len(req.RootHash)) + shares, err := srv.getter.GetSharesByNamespace(ctx, dah, namespace) + if err != nil { + return nil, pb.StatusCode_INTERNAL, fmt.Errorf("retrieving shares: %w", err) } - return nil + return shares, pb.StatusCode_OK, nil } -// respondNotFoundError sends a not found response to client -func (srv *Server) respondNotFoundError(ctx context.Context, - logger *zap.SugaredLogger, stream network.Stream) { - resp := &pb.GetSharesByNamespaceResponse{ - Status: pb.StatusCode_NOT_FOUND, - } - srv.respond(ctx, logger, stream, resp) -} +func (srv *Server) respondStatus( + ctx context.Context, + logger *zap.SugaredLogger, + stream network.Stream, + status pb.StatusCode, +) error { + srv.observeStatus(ctx, status) -// respondNamespaceNotFoundError sends a namespace not found response to client -func (srv *Server) respondNamespaceNotFoundError(ctx context.Context, - logger *zap.SugaredLogger, stream network.Stream) { - resp := &pb.GetSharesByNamespaceResponse{ - Status: pb.StatusCode_NAMESPACE_NOT_FOUND, + err := stream.SetWriteDeadline(time.Now().Add(srv.params.ServerWriteTimeout)) + if err != nil { + logger.Debugw("setting write deadline", "err", err) } - srv.respond(ctx, logger, stream, resp) -} -// respondInternalError sends internal error response to client -func (srv *Server) respondInternalError(ctx context.Context, - logger *zap.SugaredLogger, stream network.Stream) { - resp := &pb.GetSharesByNamespaceResponse{ - Status: pb.StatusCode_INTERNAL, + _, err = serde.Write(stream, &pb.GetSharesByNamespaceStatusResponse{Status: status}) + if err != nil { + return fmt.Errorf("writing response: %w", err) } - srv.respond(ctx, logger, stream, resp) + + return nil } -// namespacedSharesToResponse encodes shares into proto and sends it to client with OK status code -func namespacedSharesToResponse(shares share.NamespacedShares) *pb.GetSharesByNamespaceResponse { - rows := make([]*pb.Row, 0, len(shares)) +// sendNamespacedShares encodes shares into proto messages and sends it to client +func (srv *Server) sendNamespacedShares(shares share.NamespacedShares, stream network.Stream) error { for _, row := range shares { - proof := &pb.Proof{ - Start: int64(row.Proof.Start()), - End: int64(row.Proof.End()), - Nodes: row.Proof.Nodes(), - } - - row := &pb.Row{ + row := &pb.NamespaceRowResponse{ Shares: row.Shares, - Proof: proof, + Proof: &nmt_pb.Proof{ + Start: int64(row.Proof.Start()), + End: int64(row.Proof.End()), + Nodes: row.Proof.Nodes(), + LeafHash: row.Proof.LeafHash(), + IsMaxNamespaceIgnored: row.Proof.IsMaxNamespaceIDIgnored(), + }, + } + _, err := serde.Write(stream, row) + if err != nil { + return fmt.Errorf("writing nd data to stream: %w", err) } - - rows = append(rows, row) - } - - return &pb.GetSharesByNamespaceResponse{ - Status: pb.StatusCode_OK, - Rows: rows, } + return nil } -func (srv *Server) respond(ctx context.Context, - logger *zap.SugaredLogger, stream network.Stream, resp *pb.GetSharesByNamespaceResponse) { - err := stream.SetWriteDeadline(time.Now().Add(srv.params.ServerWriteTimeout)) - if err != nil { - logger.Debugw("server: setting write deadline", "err", err) - } - - _, err = serde.Write(stream, resp) - if err != nil { - logger.Warnw("server: writing response", "err", err) - stream.Reset() //nolint:errcheck - return - } - +func (srv *Server) observeStatus(ctx context.Context, status pb.StatusCode) { switch { - case resp.Status == pb.StatusCode_OK: + case status == pb.StatusCode_OK: srv.metrics.ObserveRequests(ctx, 1, p2p.StatusSuccess) - case resp.Status == pb.StatusCode_NOT_FOUND: + case status == pb.StatusCode_NOT_FOUND: srv.metrics.ObserveRequests(ctx, 1, p2p.StatusNotFound) - case resp.Status == pb.StatusCode_INTERNAL: + case status == pb.StatusCode_INTERNAL: srv.metrics.ObserveRequests(ctx, 1, p2p.StatusInternalErr) } - if err = stream.Close(); err != nil { - logger.Debugw("server: closing stream", "err", err) +} + +// validateRequest checks correctness of the request +func validateRequest(req pb.GetSharesByNamespaceRequest) error { + if err := share.Namespace(req.Namespace).ValidateForData(); err != nil { + return err } + if len(req.RootHash) != sha256.Size { + return fmt.Errorf("incorrect root hash length: %v", len(req.RootHash)) + } + return nil } diff --git a/share/share.go b/share/share.go index 0178054a9f..02ccd73909 100644 --- a/share/share.go +++ b/share/share.go @@ -4,32 +4,23 @@ import ( "bytes" "fmt" - "go.opentelemetry.io/otel" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - "github.com/celestiaorg/nmt/namespace" - - "github.com/celestiaorg/celestia-node/share/ipld" ) var ( - tracer = otel.Tracer("share") - // DefaultRSMT2DCodec sets the default rsmt2d.Codec for shares. DefaultRSMT2DCodec = appconsts.DefaultCodec ) const ( - // NamespaceSize is a system-wide size for NMT namespaces. - NamespaceSize = appconsts.NamespaceSize - // Size is a system-wide size of a share, including both data and namespace ID + // Size is a system-wide size of a share, including both data and namespace GetNamespace Size = appconsts.ShareSize ) var ( // MaxSquareSize is currently the maximum size supported for unerasured data in // rsmt2d.ExtendedDataSquare. - MaxSquareSize = ipld.MaxSquareSize + MaxSquareSize = appconsts.SquareSizeUpperBound(appconsts.LatestVersion) ) // Share contains the raw share data without the corresponding namespace. @@ -38,13 +29,13 @@ var ( // on it. type Share = []byte -// ID gets the namespace ID from the share. -func ID(s Share) namespace.ID { +// GetNamespace slices Namespace out of the Share. +func GetNamespace(s Share) Namespace { return s[:NamespaceSize] } -// Data gets data from the share. -func Data(s Share) []byte { +// GetData slices out data of the Share. +func GetData(s Share) []byte { return s[NamespaceSize:] } diff --git a/share/sharetest/testing.go b/share/sharetest/testing.go new file mode 100644 index 0000000000..3889260393 --- /dev/null +++ b/share/sharetest/testing.go @@ -0,0 +1,78 @@ +package sharetest + +import ( + "bytes" + "math/rand" + "sort" + "sync" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/namespace" + + "github.com/celestiaorg/celestia-node/share" +) + +// RandShares generate 'total' amount of shares filled with random data. It uses require.TestingT +// to be able to take both a *testing.T and a *testing.B. +func RandShares(t require.TestingT, total int) []share.Share { + if total&(total-1) != 0 { + t.Errorf("total must be power of 2: %d", total) + t.FailNow() + } + + shares := make([]share.Share, total) + for i := range shares { + shr := make([]byte, share.Size) + copy(share.GetNamespace(shr), RandV0Namespace()) + rndMu.Lock() + _, err := rnd.Read(share.GetData(shr)) + rndMu.Unlock() + require.NoError(t, err) + shares[i] = shr + } + sort.Slice(shares, func(i, j int) bool { return bytes.Compare(shares[i], shares[j]) < 0 }) + + return shares +} + +// RandSharesWithNamespace is same the as RandShares, but sets same namespace for all shares. +func RandSharesWithNamespace(t require.TestingT, namespace share.Namespace, total int) []share.Share { + if total&(total-1) != 0 { + t.Errorf("total must be power of 2: %d", total) + t.FailNow() + } + + shares := make([]share.Share, total) + rnd := rand.New(rand.NewSource(time.Now().Unix())) //nolint:gosec + for i := range shares { + shr := make([]byte, share.Size) + copy(share.GetNamespace(shr), namespace) + _, err := rnd.Read(share.GetData(shr)) + require.NoError(t, err) + shares[i] = shr + } + sort.Slice(shares, func(i, j int) bool { return bytes.Compare(shares[i], shares[j]) < 0 }) + return shares +} + +// RandV0Namespace generates random valid data namespace for testing purposes. +func RandV0Namespace() share.Namespace { + rb := make([]byte, namespace.NamespaceVersionZeroIDSize) + rndMu.Lock() + rnd.Read(rb) + rndMu.Unlock() + for { + namespace, _ := share.NewBlobNamespaceV0(rb) + if err := namespace.ValidateForData(); err != nil { + continue + } + return namespace + } +} + +var ( + rnd = rand.New(rand.NewSource(time.Now().Unix())) //nolint:gosec + rndMu sync.Mutex +) diff --git a/share/test_helpers.go b/share/test_helpers.go deleted file mode 100644 index c02bfc55ac..0000000000 --- a/share/test_helpers.go +++ /dev/null @@ -1,65 +0,0 @@ -package share - -import ( - "bytes" - "crypto/rand" - "sort" - - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-app/pkg/namespace" - "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/rsmt2d" -) - -// EqualEDS check whether two given EDSes are equal. -// TODO(Wondertan): Move to rsmt2d -// TODO(Wondertan): Propose use of int by default instead of uint for the sake convenience and -// Golang practices -func EqualEDS(a *rsmt2d.ExtendedDataSquare, b *rsmt2d.ExtendedDataSquare) bool { - if a.Width() != b.Width() { - return false - } - - for i := uint(0); i < a.Width(); i++ { - ar, br := a.Row(i), b.Row(i) - for j := 0; j < len(ar); j++ { - if !bytes.Equal(ar[j], br[j]) { - return false - } - } - } - - return true -} - -// RandEDS generates EDS filled with the random data with the given size for original square. It -// uses require.TestingT to be able to take both a *testing.T and a *testing.B. -func RandEDS(t require.TestingT, size int) *rsmt2d.ExtendedDataSquare { - shares := RandShares(t, size*size) - // recompute the eds - eds, err := rsmt2d.ComputeExtendedDataSquare(shares, DefaultRSMT2DCodec(), wrapper.NewConstructor(uint64(size))) - require.NoError(t, err, "failure to recompute the extended data square") - return eds -} - -// RandShares generate 'total' amount of shares filled with random data. It uses require.TestingT -// to be able to take both a *testing.T and a *testing.B. -func RandShares(t require.TestingT, total int) []Share { - if total&(total-1) != 0 { - t.Errorf("total must be power of 2: %d", total) - t.FailNow() - } - - shares := make([]Share, total) - for i := range shares { - share := make([]byte, Size) - copy(share[:NamespaceSize], namespace.RandomNamespace().Bytes()) - _, err := rand.Read(share[NamespaceSize:]) - require.NoError(t, err) - shares[i] = share - } - sort.Slice(shares, func(i, j int) bool { return bytes.Compare(shares[i], shares[j]) < 0 }) - - return shares -} diff --git a/state/core_access.go b/state/core_access.go index 7b59f3e714..2a49e70a03 100644 --- a/state/core_access.go +++ b/state/core_access.go @@ -4,13 +4,17 @@ import ( "context" "errors" "fmt" + "math" + "sync" "time" sdkErrors "cosmossdk.io/errors" "github.com/cosmos/cosmos-sdk/api/tendermint/abci" + nodeservice "github.com/cosmos/cosmos-sdk/client/grpc/node" storetypes "github.com/cosmos/cosmos-sdk/store/types" sdktypes "github.com/cosmos/cosmos-sdk/types" sdktx "github.com/cosmos/cosmos-sdk/types/tx" + auth "github.com/cosmos/cosmos-sdk/x/auth/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" logging "github.com/ipfs/go-log/v2" @@ -21,6 +25,8 @@ import ( "google.golang.org/grpc/credentials/insecure" "github.com/celestiaorg/celestia-app/app" + apperrors "github.com/celestiaorg/celestia-app/app/errors" + "github.com/celestiaorg/celestia-app/pkg/appconsts" appblob "github.com/celestiaorg/celestia-app/x/blob" apptypes "github.com/celestiaorg/celestia-app/x/blob/types" libhead "github.com/celestiaorg/go-header" @@ -34,6 +40,8 @@ var ( ErrInvalidAmount = errors.New("state: amount must be greater than zero") ) +const maxRetries = 5 + // CoreAccessor implements service over a gRPC connection // with a celestia-core node. type CoreAccessor struct { @@ -54,8 +62,15 @@ type CoreAccessor struct { rpcPort string grpcPort string + // these fields are mutatable and thus need to be protected by a mutex + lock sync.Mutex lastPayForBlob int64 payForBlobCount int64 + // minGasPrice is the minimum gas price that the node will accept. + // NOTE: just because the first node accepts the transaction, does not mean it + // will find a proposer that does accept the transaction. Better would be + // to set a global min gas price that correct processes conform to. + minGasPrice float64 } // NewCoreAccessor dials the given celestia-core endpoint and @@ -90,7 +105,11 @@ func (ca *CoreAccessor) Start(ctx context.Context) error { // dial given celestia-core endpoint endpoint := fmt.Sprintf("%s:%s", ca.coreIP, ca.grpcPort) - client, err := grpc.DialContext(ctx, endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) + client, err := grpc.DialContext( + ctx, + endpoint, + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) if err != nil { return err } @@ -108,6 +127,11 @@ func (ca *CoreAccessor) Start(ctx context.Context) error { } ca.rpcCli = cli + ca.minGasPrice, err = ca.queryMinimumGasPrice(ctx) + if err != nil { + return fmt.Errorf("querying minimum gas price: %w", err) + } + return nil } @@ -156,6 +180,10 @@ func (ca *CoreAccessor) constructSignedTx( return ca.signer.EncodeTx(tx) } +// SubmitPayForBlob builds, signs, and synchronously submits a MsgPayForBlob. It blocks until the +// transaction is committed and returns the TxReponse. If gasLim is set to 0, the method will +// automatically estimate the gas limit. If the fee is negative, the method will use the nodes min +// gas price multiplied by the gas limit. func (ca *CoreAccessor) SubmitPayForBlob( ctx context.Context, fee Int, @@ -167,28 +195,74 @@ func (ca *CoreAccessor) SubmitPayForBlob( } appblobs := make([]*apptypes.Blob, len(blobs)) - for i, blob := range blobs { - appblobs[i] = &blob.Blob - } - - response, err := appblob.SubmitPayForBlob( - ctx, - ca.signer, - ca.coreConn, - appblobs, - apptypes.SetGasLimit(gasLim), - withFee(fee), - ) - // metrics should only be counted on a successful PFD tx - if err == nil && response.Code == 0 { - ca.lastPayForBlob = time.Now().UnixMilli() - ca.payForBlobCount++ - } - - if response != nil && response.Code != 0 { - err = errors.Join(err, sdkErrors.ABCIError(response.Codespace, response.Code, response.Logs.String())) - } - return response, err + for i := range blobs { + if err := blobs[i].Namespace().ValidateForBlob(); err != nil { + return nil, err + } + appblobs[i] = &blobs[i].Blob + } + + // we only estimate gas if the user wants us to (by setting the gasLim to 0). In the future we may + // want to make these arguments optional. + if gasLim == 0 { + blobSizes := make([]uint32, len(blobs)) + for i, blob := range blobs { + blobSizes[i] = uint32(len(blob.Data)) + } + + // TODO (@cmwaters): the default gas per byte and the default tx size cost per byte could be changed + // through governance. This section could be more robust by tracking these values and adjusting the + // gas limit accordingly (as is done for the gas price) + gasLim = apptypes.EstimateGas(blobSizes, appconsts.DefaultGasPerBlobByte, auth.DefaultTxSizeCostPerByte) + } + + minGasPrice := ca.getMinGasPrice() + + // set the fee for the user as the minimum gas price multiplied by the gas limit + estimatedFee := false + if fee.IsNegative() { + estimatedFee = true + fee = sdktypes.NewInt(int64(math.Ceil(minGasPrice * float64(gasLim)))) + } + + var lastErr error + for attempt := 0; attempt < maxRetries; attempt++ { + response, err := appblob.SubmitPayForBlob( + ctx, + ca.signer, + ca.coreConn, + sdktx.BroadcastMode_BROADCAST_MODE_BLOCK, + appblobs, + apptypes.SetGasLimit(gasLim), + withFee(fee), + ) + + // the node is capable of changing the min gas price at any time so we must be able to detect it and + // update our version accordingly + if apperrors.IsInsufficientMinGasPrice(err) && estimatedFee { + // The error message contains enough information to parse the new min gas price + minGasPrice, err = apperrors.ParseInsufficientMinGasPrice(err, minGasPrice, gasLim) + if err != nil { + return nil, fmt.Errorf("parsing insufficient min gas price error: %w", err) + } + ca.setMinGasPrice(minGasPrice) + lastErr = err + // update the fee to retry again + fee = sdktypes.NewInt(int64(math.Ceil(minGasPrice * float64(gasLim)))) + continue + } + + // metrics should only be counted on a successful PFD tx + if err == nil && response.Code == 0 { + ca.markSuccessfulPFB() + } + + if response != nil && response.Code != 0 { + err = errors.Join(err, sdkErrors.ABCIError(response.Codespace, response.Code, response.Logs.String())) + } + return response, err + } + return nil, fmt.Errorf("failed to submit blobs after %d attempts: %w", maxRetries, lastErr) } func (ca *CoreAccessor) AccountAddress(context.Context) (Address, error) { @@ -221,7 +295,7 @@ func (ca *CoreAccessor) BalanceForAddress(ctx context.Context, addr Address) (*B abciReq := abci.RequestQuery{ // TODO @renayay: once https://github.com/cosmos/cosmos-sdk/pull/12674 is merged, use const instead Path: fmt.Sprintf("store/%s/key", banktypes.StoreKey), - Height: head.Height() - 1, + Height: int64(head.Height() - 1), Data: prefixedAccountKey, Prove: true, } @@ -452,6 +526,53 @@ func (ca *CoreAccessor) QueryRedelegations( }) } +func (ca *CoreAccessor) LastPayForBlob() int64 { + ca.lock.Lock() + defer ca.lock.Unlock() + return ca.lastPayForBlob +} + +func (ca *CoreAccessor) PayForBlobCount() int64 { + ca.lock.Lock() + defer ca.lock.Unlock() + return ca.payForBlobCount +} + +func (ca *CoreAccessor) markSuccessfulPFB() { + ca.lock.Lock() + defer ca.lock.Unlock() + ca.lastPayForBlob = time.Now().UnixMilli() + ca.payForBlobCount++ +} + +func (ca *CoreAccessor) setMinGasPrice(minGasPrice float64) { + ca.lock.Lock() + defer ca.lock.Unlock() + ca.minGasPrice = minGasPrice +} + +func (ca *CoreAccessor) getMinGasPrice() float64 { + ca.lock.Lock() + defer ca.lock.Unlock() + return ca.minGasPrice +} + +// QueryMinimumGasPrice returns the minimum gas price required by the node. +func (ca *CoreAccessor) queryMinimumGasPrice( + ctx context.Context, +) (float64, error) { + rsp, err := nodeservice.NewServiceClient(ca.coreConn).Config(ctx, &nodeservice.ConfigRequest{}) + if err != nil { + return 0, err + } + + coins, err := sdktypes.ParseDecCoins(rsp.MinimumGasPrice) + if err != nil { + return 0, err + } + return coins.AmountOf(app.BondDenom).MustFloat64(), nil +} + func (ca *CoreAccessor) IsStopped(context.Context) bool { return ca.ctx.Err() != nil } diff --git a/state/core_access_test.go b/state/core_access_test.go index 5018cee4f2..69e9f251c0 100644 --- a/state/core_access_test.go +++ b/state/core_access_test.go @@ -2,32 +2,95 @@ package state import ( "context" + "errors" + "fmt" + "strings" "testing" + "time" + "cosmossdk.io/math" + sdktypes "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/app" + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/test/util/testnode" + blobtypes "github.com/celestiaorg/celestia-app/x/blob/types" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/share" ) -func TestLifecycle(t *testing.T) { - ca := NewCoreAccessor(nil, nil, "", "", "") +func TestSubmitPayForBlob(t *testing.T) { + accounts := []string{"jimy", "rob"} + tmCfg := testnode.DefaultTendermintConfig() + tmCfg.Consensus.TimeoutCommit = time.Millisecond * 1 + appConf := testnode.DefaultAppConfig() + appConf.API.Enable = true + appConf.MinGasPrices = fmt.Sprintf("0.1%s", app.BondDenom) + + config := testnode.DefaultConfig().WithTendermintConfig(tmCfg).WithAppConfig(appConf).WithAccounts(accounts) + cctx, rpcAddr, grpcAddr := testnode.NewNetwork(t, config) ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + signer := blobtypes.NewKeyringSigner(cctx.Keyring, accounts[0], cctx.ChainID) + ca := NewCoreAccessor(signer, nil, "127.0.0.1", extractPort(rpcAddr), extractPort(grpcAddr)) // start the accessor err := ca.Start(ctx) require.NoError(t, err) - // ensure accessor isn't stopped - require.False(t, ca.IsStopped(ctx)) - // cancel the top level context (this should not affect the lifecycle of the - // accessor as it should manage its own internal context) - cancel() - // ensure accessor was unaffected by top-level context cancellation - require.False(t, ca.IsStopped(ctx)) - // stop the accessor - stopCtx, stopCancel := context.WithCancel(context.Background()) - t.Cleanup(stopCancel) - err = ca.Stop(stopCtx) + t.Cleanup(func() { + _ = ca.Stop(ctx) + }) + + ns, err := share.NewBlobNamespaceV0([]byte("namespace")) + require.NoError(t, err) + blobbyTheBlob, err := blob.NewBlobV0(ns, []byte("data")) require.NoError(t, err) - // ensure accessor is stopped - require.True(t, ca.IsStopped(ctx)) - // ensure that stopping the accessor again does not return an error - err = ca.Stop(stopCtx) + + minGas, err := ca.queryMinimumGasPrice(ctx) require.NoError(t, err) + require.Equal(t, appconsts.DefaultMinGasPrice, minGas) + + testcases := []struct { + name string + blobs []*blob.Blob + fee math.Int + gasLim uint64 + expErr error + }{ + { + name: "empty blobs", + blobs: []*blob.Blob{}, + fee: sdktypes.ZeroInt(), + gasLim: 0, + expErr: errors.New("state: no blobs provided"), + }, + { + name: "good blob with user provided gas and fees", + blobs: []*blob.Blob{blobbyTheBlob}, + fee: sdktypes.NewInt(10_000), // roughly 0.12 utia per gas (should be good) + gasLim: blobtypes.DefaultEstimateGas([]uint32{uint32(len(blobbyTheBlob.Data))}), + expErr: nil, + }, + // TODO: add more test cases. The problem right now is that the celestia-app doesn't + // correctly construct the node (doesn't pass the min gas price) hence the price on + // everything is zero and we can't actually test the correct behavior + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + resp, err := ca.SubmitPayForBlob(ctx, tc.fee, tc.gasLim, tc.blobs) + require.Equal(t, tc.expErr, err) + if err == nil { + require.EqualValues(t, 0, resp.Code) + } + }) + } + +} + +func extractPort(addr string) string { + splitStr := strings.Split(addr, ":") + return splitStr[len(splitStr)-1] } diff --git a/state/integration_test.go b/state/integration_test.go index 8862de1bf8..193e7bddc7 100644 --- a/state/integration_test.go +++ b/state/integration_test.go @@ -20,6 +20,7 @@ import ( "github.com/celestiaorg/celestia-app/test/util/testfactory" "github.com/celestiaorg/celestia-app/test/util/testnode" blobtypes "github.com/celestiaorg/celestia-app/x/blob/types" + libhead "github.com/celestiaorg/go-header" "github.com/celestiaorg/celestia-node/core" "github.com/celestiaorg/celestia-node/header" @@ -95,7 +96,10 @@ type localHeader struct { client rpcclient.Client } -func (l localHeader) Head(ctx context.Context) (*header.ExtendedHeader, error) { +func (l localHeader) Head( + ctx context.Context, + _ ...libhead.HeadOption[*header.ExtendedHeader], +) (*header.ExtendedHeader, error) { latest, err := l.client.Block(ctx, nil) if err != nil { return nil, err diff --git a/state/metrics.go b/state/metrics.go index e465e2833d..aa166e901d 100644 --- a/state/metrics.go +++ b/state/metrics.go @@ -3,32 +3,28 @@ package state import ( "context" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" ) -var meter = global.MeterProvider().Meter("state") +var meter = otel.Meter("state") func WithMetrics(ca *CoreAccessor) { - pfbCounter, _ := meter.AsyncInt64().Counter( + pfbCounter, _ := meter.Int64ObservableCounter( "pfb_count", - instrument.WithUnit(unit.Dimensionless), - instrument.WithDescription("Total count of submitted PayForBlob transactions"), + metric.WithDescription("Total count of submitted PayForBlob transactions"), ) - lastPfbTimestamp, _ := meter.AsyncInt64().Counter( + lastPfbTimestamp, _ := meter.Int64ObservableCounter( "last_pfb_timestamp", - instrument.WithUnit(unit.Milliseconds), - instrument.WithDescription("Timestamp of the last submitted PayForBlob transaction"), + metric.WithDescription("Timestamp of the last submitted PayForBlob transaction"), ) - err := meter.RegisterCallback( - []instrument.Asynchronous{pfbCounter, lastPfbTimestamp}, - func(ctx context.Context) { - pfbCounter.Observe(ctx, ca.payForBlobCount) - lastPfbTimestamp.Observe(ctx, ca.lastPayForBlob) - }, - ) + callback := func(ctx context.Context, observer metric.Observer) error { + observer.ObserveInt64(pfbCounter, ca.PayForBlobCount()) + observer.ObserveInt64(lastPfbTimestamp, ca.LastPayForBlob()) + return nil + } + _, err := meter.RegisterCallback(callback, pfbCounter, lastPfbTimestamp) if err != nil { panic(err) }