diff --git a/.github/workflows/build&release.yml b/.github/workflows/build&release.yml index 3dbf21bf..b3faf2c4 100644 --- a/.github/workflows/build&release.yml +++ b/.github/workflows/build&release.yml @@ -30,11 +30,15 @@ jobs: - name: Build binaries run: | + # Ensure module metadata is up to date + go mod tidy # Build supernode CGO_ENABLED=1 go build -trimpath -o /tmp/supernode ./supernode # Build sn-manager cd sn-manager + # Ensure sn-manager module metadata is up to date + go mod tidy CGO_ENABLED=0 go build -trimpath -o /tmp/sn-manager . echo "✅ Build successful" @@ -86,6 +90,8 @@ jobs: DD_API_KEY: ${{ secrets.DD_API_KEY }} DD_SITE: ${{ secrets.DD_SITE }} run: | + # Ensure module metadata is up to date + go mod tidy mkdir -p release # Build supernode @@ -105,6 +111,8 @@ jobs: # Build sn-manager cd sn-manager + # Ensure sn-manager module metadata is up to date + go mod tidy CGO_ENABLED=0 \ GOOS=linux \ GOARCH=amd64 \ diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index bad0f6ee..35081f81 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,6 +17,8 @@ jobs: uses: actions/checkout@v4 - name: Setup Go and system deps uses: ./.github/actions/setup-env + - name: Go mod tidy + run: go mod tidy - name: Run unit tests run: go test $(go list ./... | grep -v '/tests') -v @@ -75,4 +77,4 @@ jobs: # run: make setup-supernodes # - name: Run sn-manager e2e tests - # run: make test-sn-manager \ No newline at end of file + # run: make test-sn-manager diff --git a/.gitignore b/.gitignore index 685adc58..dcce8ceb 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,7 @@ *.so *.dylib *.idea/ - +*.zip # Test binary, built with `go test -c` *.test diff --git a/Makefile b/Makefile index fd9dfebf..ad855f4d 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,8 @@ -.PHONY: build build-release build-sncli build-sn-manager +.PHONY: build build-sncli build-sn-manager .PHONY: install-lumera setup-supernodes system-test-setup install-deps .PHONY: gen-cascade gen-supernode .PHONY: test-e2e test-unit test-integration test-system +.PHONY: release # Build variables VERSION ?= $(shell git describe --tags --always --dirty 2>/dev/null || echo "dev") @@ -9,9 +10,11 @@ GIT_COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown") BUILD_TIME ?= $(shell date -u '+%Y-%m-%d_%H:%M:%S') # Linker flags for version information +# Optional minimum peer version for DHT gating can be provided via MIN_VER env/make var LDFLAGS = -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.Version=$(VERSION) \ -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.GitCommit=$(GIT_COMMIT) \ -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.BuildTime=$(BUILD_TIME) \ + -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.MinVer=$(MIN_VER) \ -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDAPIKey=$(DD_API_KEY) \ -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDSite=$(DD_SITE) @@ -22,11 +25,8 @@ SN_MANAGER_LDFLAGS = -X main.Version=$(VERSION) \ build: @mkdir -p release - CGO_ENABLED=1 \ - GOOS=linux \ - GOARCH=amd64 \ - echo "Building supernode..." - go build \ + @echo "Building supernode..." + CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build \ -trimpath \ -ldflags="-s -w $(LDFLAGS)" \ -o release/supernode-linux-amd64 \ @@ -116,7 +116,7 @@ SETUP_SCRIPT=tests/scripts/setup-supernodes.sh # Optional: specify lumera binary path to skip download LUMERAD_BINARY ?= # Optional: specify installation mode (latest-release, latest-tag, or vX.Y.Z) -INSTALL_MODE ?=latest-tag +INSTALL_MODE ?=v1.7.2 install-lumera: @echo "Installing Lumera..." @@ -148,3 +148,30 @@ test-cascade: test-sn-manager: @echo "Running sn-manager e2e tests..." @cd tests/system && go test -tags=system_test -v -run '^TestSNManager' . + + + +# Release command: push branch, tag, and push tag with auto-increment - this is for testing only (including releases) setup a new remote upstream or rename the script +release: + @echo "Getting current branch..." + $(eval CURRENT_BRANCH := $(shell git branch --show-current)) + @echo "Current branch: $(CURRENT_BRANCH)" + + @echo "Getting latest tag..." + $(eval LATEST_TAG := $(shell git tag -l "v*" | sort -V | tail -n1)) + $(eval NEXT_TAG := $(shell \ + if [ -z "$(LATEST_TAG)" ]; then \ + echo "v2.5.0"; \ + else \ + echo "$(LATEST_TAG)" | sed 's/^v//' | awk -F. '{print "v" $$1 "." $$2 "." $$3+1}'; \ + fi)) + @echo "Next tag: $(NEXT_TAG)" + + @echo "Pushing branch to upstream..." + git push upstream $(CURRENT_BRANCH) -f + + @echo "Creating and pushing tag $(NEXT_TAG)..." + git tag $(NEXT_TAG) + git push upstream $(NEXT_TAG) + + @echo "Release complete: $(NEXT_TAG) pushed to upstream" diff --git a/README.md b/README.md index 6e152ba3..4b4da332 100644 --- a/README.md +++ b/README.md @@ -54,19 +54,12 @@ message StatusResponse { string hardware_summary = 4; // Formatted hardware summary (e.g., "8 cores / 32GB RAM") } - message ServiceTasks { - string service_name = 1; - repeated string task_ids = 2; - int32 task_count = 3; - } - message Network { int32 peers_count = 1; // Number of connected peers in P2P network repeated string peer_addresses = 2; // List of connected peer addresses (format: "ID@IP:Port") } Resources resources = 3; - repeated ServiceTasks running_tasks = 4; // Services with currently running tasks repeated string registered_services = 5; // All registered/available services Network network = 6; // P2P network information int32 rank = 7; // Rank in the top supernodes list (0 if not in top list) diff --git a/cmd/sncli/cli/cmd_get_status.go b/cmd/sncli/cli/cmd_get_status.go index 9603089b..bb181674 100644 --- a/cmd/sncli/cli/cmd_get_status.go +++ b/cmd/sncli/cli/cmd_get_status.go @@ -20,13 +20,6 @@ func (c *CLI) GetSupernodeStatus() error { fmt.Printf(" Memory: %.2fGB used / %.2fGB total (%.2f%%)\n", resp.Resources.Memory.UsedGB, resp.Resources.Memory.TotalGB, resp.Resources.Memory.UsagePercent) - if len(resp.RunningTasks) > 0 { - fmt.Println(" Running Tasks:") - for _, service := range resp.RunningTasks { - fmt.Printf(" - %s (Tasks: %d)\n", service.ServiceName, service.TaskCount) - } - } - if len(resp.RegisteredServices) > 0 { fmt.Println(" Registered Services:") for _, svc := range resp.RegisteredServices { diff --git a/gen/dupedetection/dd-server.pb.go b/gen/dupedetection/dd-server.pb.go deleted file mode 100644 index 69e63a6d..00000000 --- a/gen/dupedetection/dd-server.pb.go +++ /dev/null @@ -1,1263 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.35.1 -// protoc v3.12.4 -// source: dd-server.proto - -package dupedetection - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type RarenessScoreRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ImageFilepath string `protobuf:"bytes,1,opt,name=image_filepath,json=imageFilepath,proto3" json:"image_filepath,omitempty"` - PastelBlockHashWhenRequestSubmitted string `protobuf:"bytes,2,opt,name=pastel_block_hash_when_request_submitted,json=pastelBlockHashWhenRequestSubmitted,proto3" json:"pastel_block_hash_when_request_submitted,omitempty"` - PastelBlockHeightWhenRequestSubmitted string `protobuf:"bytes,3,opt,name=pastel_block_height_when_request_submitted,json=pastelBlockHeightWhenRequestSubmitted,proto3" json:"pastel_block_height_when_request_submitted,omitempty"` - UtcTimestampWhenRequestSubmitted string `protobuf:"bytes,4,opt,name=utc_timestamp_when_request_submitted,json=utcTimestampWhenRequestSubmitted,proto3" json:"utc_timestamp_when_request_submitted,omitempty"` - PastelIdOfSubmitter string `protobuf:"bytes,5,opt,name=pastel_id_of_submitter,json=pastelIdOfSubmitter,proto3" json:"pastel_id_of_submitter,omitempty"` - PastelIdOfRegisteringSupernode_1 string `protobuf:"bytes,6,opt,name=pastel_id_of_registering_supernode_1,json=pastelIdOfRegisteringSupernode1,proto3" json:"pastel_id_of_registering_supernode_1,omitempty"` - PastelIdOfRegisteringSupernode_2 string `protobuf:"bytes,7,opt,name=pastel_id_of_registering_supernode_2,json=pastelIdOfRegisteringSupernode2,proto3" json:"pastel_id_of_registering_supernode_2,omitempty"` - PastelIdOfRegisteringSupernode_3 string `protobuf:"bytes,8,opt,name=pastel_id_of_registering_supernode_3,json=pastelIdOfRegisteringSupernode3,proto3" json:"pastel_id_of_registering_supernode_3,omitempty"` - IsPastelOpenapiRequest bool `protobuf:"varint,9,opt,name=is_pastel_openapi_request,json=isPastelOpenapiRequest,proto3" json:"is_pastel_openapi_request,omitempty"` - OpenApiGroupIdString string `protobuf:"bytes,10,opt,name=open_api_group_id_string,json=openApiGroupIdString,proto3" json:"open_api_group_id_string,omitempty"` - CollectionNameString string `protobuf:"bytes,11,opt,name=collection_name_string,json=collectionNameString,proto3" json:"collection_name_string,omitempty"` -} - -func (x *RarenessScoreRequest) Reset() { - *x = RarenessScoreRequest{} - mi := &file_dd_server_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RarenessScoreRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RarenessScoreRequest) ProtoMessage() {} - -func (x *RarenessScoreRequest) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RarenessScoreRequest.ProtoReflect.Descriptor instead. -func (*RarenessScoreRequest) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{0} -} - -func (x *RarenessScoreRequest) GetImageFilepath() string { - if x != nil { - return x.ImageFilepath - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelBlockHashWhenRequestSubmitted() string { - if x != nil { - return x.PastelBlockHashWhenRequestSubmitted - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelBlockHeightWhenRequestSubmitted() string { - if x != nil { - return x.PastelBlockHeightWhenRequestSubmitted - } - return "" -} - -func (x *RarenessScoreRequest) GetUtcTimestampWhenRequestSubmitted() string { - if x != nil { - return x.UtcTimestampWhenRequestSubmitted - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelIdOfSubmitter() string { - if x != nil { - return x.PastelIdOfSubmitter - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelIdOfRegisteringSupernode_1() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_1 - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelIdOfRegisteringSupernode_2() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_2 - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelIdOfRegisteringSupernode_3() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_3 - } - return "" -} - -func (x *RarenessScoreRequest) GetIsPastelOpenapiRequest() bool { - if x != nil { - return x.IsPastelOpenapiRequest - } - return false -} - -func (x *RarenessScoreRequest) GetOpenApiGroupIdString() string { - if x != nil { - return x.OpenApiGroupIdString - } - return "" -} - -func (x *RarenessScoreRequest) GetCollectionNameString() string { - if x != nil { - return x.CollectionNameString - } - return "" -} - -type ImageRarenessScoreReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PastelBlockHashWhenRequestSubmitted string `protobuf:"bytes,1,opt,name=pastel_block_hash_when_request_submitted,json=pastelBlockHashWhenRequestSubmitted,proto3" json:"pastel_block_hash_when_request_submitted,omitempty"` - PastelBlockHeightWhenRequestSubmitted string `protobuf:"bytes,2,opt,name=pastel_block_height_when_request_submitted,json=pastelBlockHeightWhenRequestSubmitted,proto3" json:"pastel_block_height_when_request_submitted,omitempty"` - UtcTimestampWhenRequestSubmitted string `protobuf:"bytes,3,opt,name=utc_timestamp_when_request_submitted,json=utcTimestampWhenRequestSubmitted,proto3" json:"utc_timestamp_when_request_submitted,omitempty"` - PastelIdOfSubmitter string `protobuf:"bytes,4,opt,name=pastel_id_of_submitter,json=pastelIdOfSubmitter,proto3" json:"pastel_id_of_submitter,omitempty"` - PastelIdOfRegisteringSupernode_1 string `protobuf:"bytes,5,opt,name=pastel_id_of_registering_supernode_1,json=pastelIdOfRegisteringSupernode1,proto3" json:"pastel_id_of_registering_supernode_1,omitempty"` - PastelIdOfRegisteringSupernode_2 string `protobuf:"bytes,6,opt,name=pastel_id_of_registering_supernode_2,json=pastelIdOfRegisteringSupernode2,proto3" json:"pastel_id_of_registering_supernode_2,omitempty"` - PastelIdOfRegisteringSupernode_3 string `protobuf:"bytes,7,opt,name=pastel_id_of_registering_supernode_3,json=pastelIdOfRegisteringSupernode3,proto3" json:"pastel_id_of_registering_supernode_3,omitempty"` - IsPastelOpenapiRequest bool `protobuf:"varint,8,opt,name=is_pastel_openapi_request,json=isPastelOpenapiRequest,proto3" json:"is_pastel_openapi_request,omitempty"` - ImageFilePath string `protobuf:"bytes,9,opt,name=image_file_path,json=imageFilePath,proto3" json:"image_file_path,omitempty"` - DupeDetectionSystemVersion string `protobuf:"bytes,10,opt,name=dupe_detection_system_version,json=dupeDetectionSystemVersion,proto3" json:"dupe_detection_system_version,omitempty"` - IsLikelyDupe bool `protobuf:"varint,11,opt,name=is_likely_dupe,json=isLikelyDupe,proto3" json:"is_likely_dupe,omitempty"` - IsRareOnInternet bool `protobuf:"varint,12,opt,name=is_rare_on_internet,json=isRareOnInternet,proto3" json:"is_rare_on_internet,omitempty"` - OverallRarenessScore float32 `protobuf:"fixed32,13,opt,name=overall_rareness_score,json=overallRarenessScore,proto3" json:"overall_rareness_score,omitempty"` - PctOfTop_10MostSimilarWithDupeProbAbove_25Pct float32 `protobuf:"fixed32,14,opt,name=pct_of_top_10_most_similar_with_dupe_prob_above_25pct,json=pctOfTop10MostSimilarWithDupeProbAbove25pct,proto3" json:"pct_of_top_10_most_similar_with_dupe_prob_above_25pct,omitempty"` - PctOfTop_10MostSimilarWithDupeProbAbove_33Pct float32 `protobuf:"fixed32,15,opt,name=pct_of_top_10_most_similar_with_dupe_prob_above_33pct,json=pctOfTop10MostSimilarWithDupeProbAbove33pct,proto3" json:"pct_of_top_10_most_similar_with_dupe_prob_above_33pct,omitempty"` - PctOfTop_10MostSimilarWithDupeProbAbove_50Pct float32 `protobuf:"fixed32,16,opt,name=pct_of_top_10_most_similar_with_dupe_prob_above_50pct,json=pctOfTop10MostSimilarWithDupeProbAbove50pct,proto3" json:"pct_of_top_10_most_similar_with_dupe_prob_above_50pct,omitempty"` - RarenessScoresTableJsonCompressedB64 string `protobuf:"bytes,17,opt,name=rareness_scores_table_json_compressed_b64,json=rarenessScoresTableJsonCompressedB64,proto3" json:"rareness_scores_table_json_compressed_b64,omitempty"` - InternetRareness *InternetRareness `protobuf:"bytes,18,opt,name=internet_rareness,json=internetRareness,proto3" json:"internet_rareness,omitempty"` - OpenNsfwScore float32 `protobuf:"fixed32,19,opt,name=open_nsfw_score,json=openNsfwScore,proto3" json:"open_nsfw_score,omitempty"` - AlternativeNsfwScores *AltNsfwScores `protobuf:"bytes,20,opt,name=alternative_nsfw_scores,json=alternativeNsfwScores,proto3" json:"alternative_nsfw_scores,omitempty"` - ImageFingerprintOfCandidateImageFile []float64 `protobuf:"fixed64,21,rep,packed,name=image_fingerprint_of_candidate_image_file,json=imageFingerprintOfCandidateImageFile,proto3" json:"image_fingerprint_of_candidate_image_file,omitempty"` - CollectionNameString string `protobuf:"bytes,22,opt,name=collection_name_string,json=collectionNameString,proto3" json:"collection_name_string,omitempty"` - HashOfCandidateImageFile string `protobuf:"bytes,23,opt,name=hash_of_candidate_image_file,json=hashOfCandidateImageFile,proto3" json:"hash_of_candidate_image_file,omitempty"` - OpenApiGroupIdString string `protobuf:"bytes,24,opt,name=open_api_group_id_string,json=openApiGroupIdString,proto3" json:"open_api_group_id_string,omitempty"` - GroupRarenessScore float32 `protobuf:"fixed32,25,opt,name=group_rareness_score,json=groupRarenessScore,proto3" json:"group_rareness_score,omitempty"` - CandidateImageThumbnailWebpAsBase64String string `protobuf:"bytes,26,opt,name=candidate_image_thumbnail_webp_as_base64_string,json=candidateImageThumbnailWebpAsBase64String,proto3" json:"candidate_image_thumbnail_webp_as_base64_string,omitempty"` - DoesNotImpactTheFollowingCollectionStrings string `protobuf:"bytes,27,opt,name=does_not_impact_the_following_collection_strings,json=doesNotImpactTheFollowingCollectionStrings,proto3" json:"does_not_impact_the_following_collection_strings,omitempty"` - IsInvalidSenseRequest bool `protobuf:"varint,28,opt,name=is_invalid_sense_request,json=isInvalidSenseRequest,proto3" json:"is_invalid_sense_request,omitempty"` - InvalidSenseRequestReason string `protobuf:"bytes,29,opt,name=invalid_sense_request_reason,json=invalidSenseRequestReason,proto3" json:"invalid_sense_request_reason,omitempty"` - SimilarityScoreToFirstEntryInCollection float32 `protobuf:"fixed32,30,opt,name=similarity_score_to_first_entry_in_collection,json=similarityScoreToFirstEntryInCollection,proto3" json:"similarity_score_to_first_entry_in_collection,omitempty"` - CpProbability float32 `protobuf:"fixed32,31,opt,name=cp_probability,json=cpProbability,proto3" json:"cp_probability,omitempty"` - ChildProbability float32 `protobuf:"fixed32,32,opt,name=child_probability,json=childProbability,proto3" json:"child_probability,omitempty"` - ImageFingerprintSetChecksum string `protobuf:"bytes,33,opt,name=image_fingerprint_set_checksum,json=imageFingerprintSetChecksum,proto3" json:"image_fingerprint_set_checksum,omitempty"` -} - -func (x *ImageRarenessScoreReply) Reset() { - *x = ImageRarenessScoreReply{} - mi := &file_dd_server_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ImageRarenessScoreReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ImageRarenessScoreReply) ProtoMessage() {} - -func (x *ImageRarenessScoreReply) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ImageRarenessScoreReply.ProtoReflect.Descriptor instead. -func (*ImageRarenessScoreReply) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{1} -} - -func (x *ImageRarenessScoreReply) GetPastelBlockHashWhenRequestSubmitted() string { - if x != nil { - return x.PastelBlockHashWhenRequestSubmitted - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelBlockHeightWhenRequestSubmitted() string { - if x != nil { - return x.PastelBlockHeightWhenRequestSubmitted - } - return "" -} - -func (x *ImageRarenessScoreReply) GetUtcTimestampWhenRequestSubmitted() string { - if x != nil { - return x.UtcTimestampWhenRequestSubmitted - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelIdOfSubmitter() string { - if x != nil { - return x.PastelIdOfSubmitter - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelIdOfRegisteringSupernode_1() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_1 - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelIdOfRegisteringSupernode_2() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_2 - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelIdOfRegisteringSupernode_3() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_3 - } - return "" -} - -func (x *ImageRarenessScoreReply) GetIsPastelOpenapiRequest() bool { - if x != nil { - return x.IsPastelOpenapiRequest - } - return false -} - -func (x *ImageRarenessScoreReply) GetImageFilePath() string { - if x != nil { - return x.ImageFilePath - } - return "" -} - -func (x *ImageRarenessScoreReply) GetDupeDetectionSystemVersion() string { - if x != nil { - return x.DupeDetectionSystemVersion - } - return "" -} - -func (x *ImageRarenessScoreReply) GetIsLikelyDupe() bool { - if x != nil { - return x.IsLikelyDupe - } - return false -} - -func (x *ImageRarenessScoreReply) GetIsRareOnInternet() bool { - if x != nil { - return x.IsRareOnInternet - } - return false -} - -func (x *ImageRarenessScoreReply) GetOverallRarenessScore() float32 { - if x != nil { - return x.OverallRarenessScore - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetPctOfTop_10MostSimilarWithDupeProbAbove_25Pct() float32 { - if x != nil { - return x.PctOfTop_10MostSimilarWithDupeProbAbove_25Pct - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetPctOfTop_10MostSimilarWithDupeProbAbove_33Pct() float32 { - if x != nil { - return x.PctOfTop_10MostSimilarWithDupeProbAbove_33Pct - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetPctOfTop_10MostSimilarWithDupeProbAbove_50Pct() float32 { - if x != nil { - return x.PctOfTop_10MostSimilarWithDupeProbAbove_50Pct - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetRarenessScoresTableJsonCompressedB64() string { - if x != nil { - return x.RarenessScoresTableJsonCompressedB64 - } - return "" -} - -func (x *ImageRarenessScoreReply) GetInternetRareness() *InternetRareness { - if x != nil { - return x.InternetRareness - } - return nil -} - -func (x *ImageRarenessScoreReply) GetOpenNsfwScore() float32 { - if x != nil { - return x.OpenNsfwScore - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetAlternativeNsfwScores() *AltNsfwScores { - if x != nil { - return x.AlternativeNsfwScores - } - return nil -} - -func (x *ImageRarenessScoreReply) GetImageFingerprintOfCandidateImageFile() []float64 { - if x != nil { - return x.ImageFingerprintOfCandidateImageFile - } - return nil -} - -func (x *ImageRarenessScoreReply) GetCollectionNameString() string { - if x != nil { - return x.CollectionNameString - } - return "" -} - -func (x *ImageRarenessScoreReply) GetHashOfCandidateImageFile() string { - if x != nil { - return x.HashOfCandidateImageFile - } - return "" -} - -func (x *ImageRarenessScoreReply) GetOpenApiGroupIdString() string { - if x != nil { - return x.OpenApiGroupIdString - } - return "" -} - -func (x *ImageRarenessScoreReply) GetGroupRarenessScore() float32 { - if x != nil { - return x.GroupRarenessScore - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetCandidateImageThumbnailWebpAsBase64String() string { - if x != nil { - return x.CandidateImageThumbnailWebpAsBase64String - } - return "" -} - -func (x *ImageRarenessScoreReply) GetDoesNotImpactTheFollowingCollectionStrings() string { - if x != nil { - return x.DoesNotImpactTheFollowingCollectionStrings - } - return "" -} - -func (x *ImageRarenessScoreReply) GetIsInvalidSenseRequest() bool { - if x != nil { - return x.IsInvalidSenseRequest - } - return false -} - -func (x *ImageRarenessScoreReply) GetInvalidSenseRequestReason() string { - if x != nil { - return x.InvalidSenseRequestReason - } - return "" -} - -func (x *ImageRarenessScoreReply) GetSimilarityScoreToFirstEntryInCollection() float32 { - if x != nil { - return x.SimilarityScoreToFirstEntryInCollection - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetCpProbability() float32 { - if x != nil { - return x.CpProbability - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetChildProbability() float32 { - if x != nil { - return x.ChildProbability - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetImageFingerprintSetChecksum() string { - if x != nil { - return x.ImageFingerprintSetChecksum - } - return "" -} - -type InternetRareness struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RareOnInternetSummaryTableAsJsonCompressedB64 string `protobuf:"bytes,1,opt,name=rare_on_internet_summary_table_as_json_compressed_b64,json=rareOnInternetSummaryTableAsJsonCompressedB64,proto3" json:"rare_on_internet_summary_table_as_json_compressed_b64,omitempty"` - RareOnInternetGraphJsonCompressedB64 string `protobuf:"bytes,2,opt,name=rare_on_internet_graph_json_compressed_b64,json=rareOnInternetGraphJsonCompressedB64,proto3" json:"rare_on_internet_graph_json_compressed_b64,omitempty"` - AlternativeRareOnInternetDictAsJsonCompressedB64 string `protobuf:"bytes,3,opt,name=alternative_rare_on_internet_dict_as_json_compressed_b64,json=alternativeRareOnInternetDictAsJsonCompressedB64,proto3" json:"alternative_rare_on_internet_dict_as_json_compressed_b64,omitempty"` - MinNumberOfExactMatchesInPage uint32 `protobuf:"varint,4,opt,name=min_number_of_exact_matches_in_page,json=minNumberOfExactMatchesInPage,proto3" json:"min_number_of_exact_matches_in_page,omitempty"` - EarliestAvailableDateOfInternetResults string `protobuf:"bytes,5,opt,name=earliest_available_date_of_internet_results,json=earliestAvailableDateOfInternetResults,proto3" json:"earliest_available_date_of_internet_results,omitempty"` -} - -func (x *InternetRareness) Reset() { - *x = InternetRareness{} - mi := &file_dd_server_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *InternetRareness) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InternetRareness) ProtoMessage() {} - -func (x *InternetRareness) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InternetRareness.ProtoReflect.Descriptor instead. -func (*InternetRareness) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{2} -} - -func (x *InternetRareness) GetRareOnInternetSummaryTableAsJsonCompressedB64() string { - if x != nil { - return x.RareOnInternetSummaryTableAsJsonCompressedB64 - } - return "" -} - -func (x *InternetRareness) GetRareOnInternetGraphJsonCompressedB64() string { - if x != nil { - return x.RareOnInternetGraphJsonCompressedB64 - } - return "" -} - -func (x *InternetRareness) GetAlternativeRareOnInternetDictAsJsonCompressedB64() string { - if x != nil { - return x.AlternativeRareOnInternetDictAsJsonCompressedB64 - } - return "" -} - -func (x *InternetRareness) GetMinNumberOfExactMatchesInPage() uint32 { - if x != nil { - return x.MinNumberOfExactMatchesInPage - } - return 0 -} - -func (x *InternetRareness) GetEarliestAvailableDateOfInternetResults() string { - if x != nil { - return x.EarliestAvailableDateOfInternetResults - } - return "" -} - -type AltNsfwScores struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Drawings float32 `protobuf:"fixed32,1,opt,name=drawings,proto3" json:"drawings,omitempty"` - Hentai float32 `protobuf:"fixed32,2,opt,name=hentai,proto3" json:"hentai,omitempty"` - Neutral float32 `protobuf:"fixed32,3,opt,name=neutral,proto3" json:"neutral,omitempty"` - Porn float32 `protobuf:"fixed32,4,opt,name=porn,proto3" json:"porn,omitempty"` - Sexy float32 `protobuf:"fixed32,5,opt,name=sexy,proto3" json:"sexy,omitempty"` -} - -func (x *AltNsfwScores) Reset() { - *x = AltNsfwScores{} - mi := &file_dd_server_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *AltNsfwScores) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AltNsfwScores) ProtoMessage() {} - -func (x *AltNsfwScores) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AltNsfwScores.ProtoReflect.Descriptor instead. -func (*AltNsfwScores) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{3} -} - -func (x *AltNsfwScores) GetDrawings() float32 { - if x != nil { - return x.Drawings - } - return 0 -} - -func (x *AltNsfwScores) GetHentai() float32 { - if x != nil { - return x.Hentai - } - return 0 -} - -func (x *AltNsfwScores) GetNeutral() float32 { - if x != nil { - return x.Neutral - } - return 0 -} - -func (x *AltNsfwScores) GetPorn() float32 { - if x != nil { - return x.Porn - } - return 0 -} - -func (x *AltNsfwScores) GetSexy() float32 { - if x != nil { - return x.Sexy - } - return 0 -} - -type GetStatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetStatusRequest) Reset() { - *x = GetStatusRequest{} - mi := &file_dd_server_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetStatusRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetStatusRequest) ProtoMessage() {} - -func (x *GetStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetStatusRequest.ProtoReflect.Descriptor instead. -func (*GetStatusRequest) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{4} -} - -type TaskCount struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - MaxConcurrent int32 `protobuf:"varint,1,opt,name=max_concurrent,json=maxConcurrent,proto3" json:"max_concurrent,omitempty"` - Executing int32 `protobuf:"varint,2,opt,name=executing,proto3" json:"executing,omitempty"` - WaitingInQueue int32 `protobuf:"varint,3,opt,name=waiting_in_queue,json=waitingInQueue,proto3" json:"waiting_in_queue,omitempty"` - Succeeded int32 `protobuf:"varint,4,opt,name=succeeded,proto3" json:"succeeded,omitempty"` - Failed int32 `protobuf:"varint,5,opt,name=failed,proto3" json:"failed,omitempty"` - Cancelled int32 `protobuf:"varint,6,opt,name=cancelled,proto3" json:"cancelled,omitempty"` -} - -func (x *TaskCount) Reset() { - *x = TaskCount{} - mi := &file_dd_server_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskCount) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskCount) ProtoMessage() {} - -func (x *TaskCount) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskCount.ProtoReflect.Descriptor instead. -func (*TaskCount) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{5} -} - -func (x *TaskCount) GetMaxConcurrent() int32 { - if x != nil { - return x.MaxConcurrent - } - return 0 -} - -func (x *TaskCount) GetExecuting() int32 { - if x != nil { - return x.Executing - } - return 0 -} - -func (x *TaskCount) GetWaitingInQueue() int32 { - if x != nil { - return x.WaitingInQueue - } - return 0 -} - -func (x *TaskCount) GetSucceeded() int32 { - if x != nil { - return x.Succeeded - } - return 0 -} - -func (x *TaskCount) GetFailed() int32 { - if x != nil { - return x.Failed - } - return 0 -} - -func (x *TaskCount) GetCancelled() int32 { - if x != nil { - return x.Cancelled - } - return 0 -} - -type TaskMetrics struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AverageTaskWaitTimeSecs float32 `protobuf:"fixed32,1,opt,name=average_task_wait_time_secs,json=averageTaskWaitTimeSecs,proto3" json:"average_task_wait_time_secs,omitempty"` - MaxTaskWaitTimeSecs float32 `protobuf:"fixed32,2,opt,name=max_task_wait_time_secs,json=maxTaskWaitTimeSecs,proto3" json:"max_task_wait_time_secs,omitempty"` - AverageTaskExecutionTimeSecs float32 `protobuf:"fixed32,3,opt,name=average_task_execution_time_secs,json=averageTaskExecutionTimeSecs,proto3" json:"average_task_execution_time_secs,omitempty"` - AverageTaskVirtualMemoryUsageBytes int64 `protobuf:"varint,4,opt,name=average_task_virtual_memory_usage_bytes,json=averageTaskVirtualMemoryUsageBytes,proto3" json:"average_task_virtual_memory_usage_bytes,omitempty"` - AverageTaskRssMemoryUsageBytes int64 `protobuf:"varint,5,opt,name=average_task_rss_memory_usage_bytes,json=averageTaskRssMemoryUsageBytes,proto3" json:"average_task_rss_memory_usage_bytes,omitempty"` - PeakTaskRssMemoryUsageBytes int64 `protobuf:"varint,6,opt,name=peak_task_rss_memory_usage_bytes,json=peakTaskRssMemoryUsageBytes,proto3" json:"peak_task_rss_memory_usage_bytes,omitempty"` - PeakTaskVmsMemoryUsageBytes int64 `protobuf:"varint,7,opt,name=peak_task_vms_memory_usage_bytes,json=peakTaskVmsMemoryUsageBytes,proto3" json:"peak_task_vms_memory_usage_bytes,omitempty"` -} - -func (x *TaskMetrics) Reset() { - *x = TaskMetrics{} - mi := &file_dd_server_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskMetrics) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskMetrics) ProtoMessage() {} - -func (x *TaskMetrics) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskMetrics.ProtoReflect.Descriptor instead. -func (*TaskMetrics) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{6} -} - -func (x *TaskMetrics) GetAverageTaskWaitTimeSecs() float32 { - if x != nil { - return x.AverageTaskWaitTimeSecs - } - return 0 -} - -func (x *TaskMetrics) GetMaxTaskWaitTimeSecs() float32 { - if x != nil { - return x.MaxTaskWaitTimeSecs - } - return 0 -} - -func (x *TaskMetrics) GetAverageTaskExecutionTimeSecs() float32 { - if x != nil { - return x.AverageTaskExecutionTimeSecs - } - return 0 -} - -func (x *TaskMetrics) GetAverageTaskVirtualMemoryUsageBytes() int64 { - if x != nil { - return x.AverageTaskVirtualMemoryUsageBytes - } - return 0 -} - -func (x *TaskMetrics) GetAverageTaskRssMemoryUsageBytes() int64 { - if x != nil { - return x.AverageTaskRssMemoryUsageBytes - } - return 0 -} - -func (x *TaskMetrics) GetPeakTaskRssMemoryUsageBytes() int64 { - if x != nil { - return x.PeakTaskRssMemoryUsageBytes - } - return 0 -} - -func (x *TaskMetrics) GetPeakTaskVmsMemoryUsageBytes() int64 { - if x != nil { - return x.PeakTaskVmsMemoryUsageBytes - } - return 0 -} - -type GetStatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - TaskCount *TaskCount `protobuf:"bytes,2,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"` - TaskMetrics *TaskMetrics `protobuf:"bytes,3,opt,name=task_metrics,json=taskMetrics,proto3" json:"task_metrics,omitempty"` -} - -func (x *GetStatusResponse) Reset() { - *x = GetStatusResponse{} - mi := &file_dd_server_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetStatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetStatusResponse) ProtoMessage() {} - -func (x *GetStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetStatusResponse.ProtoReflect.Descriptor instead. -func (*GetStatusResponse) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{7} -} - -func (x *GetStatusResponse) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *GetStatusResponse) GetTaskCount() *TaskCount { - if x != nil { - return x.TaskCount - } - return nil -} - -func (x *GetStatusResponse) GetTaskMetrics() *TaskMetrics { - if x != nil { - return x.TaskMetrics - } - return nil -} - -var File_dd_server_proto protoreflect.FileDescriptor - -var file_dd_server_proto_rawDesc = []byte{ - 0x0a, 0x0f, 0x64, 0x64, 0x2d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0d, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x8a, 0x06, 0x0a, 0x14, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, 0x63, 0x6f, - 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6d, 0x61, - 0x67, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, - 0x12, 0x55, 0x0a, 0x28, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x77, 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x23, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, - 0x61, 0x73, 0x68, 0x57, 0x68, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x75, - 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x59, 0x0a, 0x2a, 0x70, 0x61, 0x73, 0x74, 0x65, - 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x77, - 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, - 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x25, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x57, 0x68, - 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, - 0x65, 0x64, 0x12, 0x4e, 0x0a, 0x24, 0x75, 0x74, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x5f, 0x77, 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x20, 0x75, 0x74, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x57, 0x68, - 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, - 0x65, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, - 0x6f, 0x66, 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x13, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x53, 0x75, - 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, - 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x31, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, - 0x66, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x31, 0x12, 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, - 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x32, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x32, 0x12, 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, - 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, - 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x33, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x33, 0x12, 0x39, 0x0a, 0x19, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x73, 0x74, 0x65, - 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x69, 0x73, 0x50, 0x61, 0x73, 0x74, 0x65, - 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x36, 0x0a, 0x18, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x5f, 0x69, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x14, 0x6f, 0x70, 0x65, 0x6e, 0x41, 0x70, 0x69, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, - 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x22, 0xcd, 0x12, - 0x0a, 0x17, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, - 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x55, 0x0a, 0x28, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x77, - 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, - 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x23, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x57, 0x68, 0x65, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, - 0x12, 0x59, 0x0a, 0x2a, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x77, 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x25, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x57, 0x68, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x4e, 0x0a, 0x24, 0x75, - 0x74, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x77, 0x68, 0x65, - 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, - 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x20, 0x75, 0x74, 0x63, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x57, 0x68, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x70, - 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x75, 0x62, 0x6d, - 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x72, - 0x12, 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, - 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x31, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, - 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x31, 0x12, - 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, - 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x32, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, - 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, - 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x32, 0x12, 0x4d, - 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x33, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, 0x61, - 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x33, 0x12, 0x39, 0x0a, - 0x19, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x16, 0x69, 0x73, 0x50, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, - 0x12, 0x41, 0x0a, 0x1d, 0x64, 0x75, 0x70, 0x65, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x64, 0x75, 0x70, 0x65, 0x44, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x6b, 0x65, 0x6c, 0x79, - 0x5f, 0x64, 0x75, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x4c, - 0x69, 0x6b, 0x65, 0x6c, 0x79, 0x44, 0x75, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x13, 0x69, 0x73, 0x5f, - 0x72, 0x61, 0x72, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x52, 0x61, 0x72, 0x65, 0x4f, 0x6e, - 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x6f, 0x76, 0x65, 0x72, - 0x61, 0x6c, 0x6c, 0x5f, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x73, 0x63, 0x6f, - 0x72, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, - 0x6c, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x6a, - 0x0a, 0x35, 0x70, 0x63, 0x74, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x6f, 0x70, 0x5f, 0x31, 0x30, 0x5f, - 0x6d, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, 0x5f, 0x77, 0x69, 0x74, - 0x68, 0x5f, 0x64, 0x75, 0x70, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x5f, 0x61, 0x62, 0x6f, 0x76, - 0x65, 0x5f, 0x32, 0x35, 0x70, 0x63, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x02, 0x52, 0x2b, 0x70, - 0x63, 0x74, 0x4f, 0x66, 0x54, 0x6f, 0x70, 0x31, 0x30, 0x4d, 0x6f, 0x73, 0x74, 0x53, 0x69, 0x6d, - 0x69, 0x6c, 0x61, 0x72, 0x57, 0x69, 0x74, 0x68, 0x44, 0x75, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x62, - 0x41, 0x62, 0x6f, 0x76, 0x65, 0x32, 0x35, 0x70, 0x63, 0x74, 0x12, 0x6a, 0x0a, 0x35, 0x70, 0x63, - 0x74, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x6f, 0x70, 0x5f, 0x31, 0x30, 0x5f, 0x6d, 0x6f, 0x73, 0x74, - 0x5f, 0x73, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x64, 0x75, - 0x70, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x5f, 0x61, 0x62, 0x6f, 0x76, 0x65, 0x5f, 0x33, 0x33, - 0x70, 0x63, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x02, 0x52, 0x2b, 0x70, 0x63, 0x74, 0x4f, 0x66, - 0x54, 0x6f, 0x70, 0x31, 0x30, 0x4d, 0x6f, 0x73, 0x74, 0x53, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, - 0x57, 0x69, 0x74, 0x68, 0x44, 0x75, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x62, 0x41, 0x62, 0x6f, 0x76, - 0x65, 0x33, 0x33, 0x70, 0x63, 0x74, 0x12, 0x6a, 0x0a, 0x35, 0x70, 0x63, 0x74, 0x5f, 0x6f, 0x66, - 0x5f, 0x74, 0x6f, 0x70, 0x5f, 0x31, 0x30, 0x5f, 0x6d, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x6d, - 0x69, 0x6c, 0x61, 0x72, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x64, 0x75, 0x70, 0x65, 0x5f, 0x70, - 0x72, 0x6f, 0x62, 0x5f, 0x61, 0x62, 0x6f, 0x76, 0x65, 0x5f, 0x35, 0x30, 0x70, 0x63, 0x74, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x02, 0x52, 0x2b, 0x70, 0x63, 0x74, 0x4f, 0x66, 0x54, 0x6f, 0x70, 0x31, - 0x30, 0x4d, 0x6f, 0x73, 0x74, 0x53, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, 0x57, 0x69, 0x74, 0x68, - 0x44, 0x75, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x62, 0x41, 0x62, 0x6f, 0x76, 0x65, 0x35, 0x30, 0x70, - 0x63, 0x74, 0x12, 0x57, 0x0a, 0x29, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x73, - 0x63, 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, - 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x36, 0x34, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x24, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, - 0x63, 0x6f, 0x72, 0x65, 0x73, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4a, 0x73, 0x6f, 0x6e, 0x43, 0x6f, - 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x36, 0x34, 0x12, 0x4c, 0x0a, 0x11, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x5f, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, - 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x52, - 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x52, 0x10, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, - 0x74, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6f, 0x70, 0x65, - 0x6e, 0x5f, 0x6e, 0x73, 0x66, 0x77, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x13, 0x20, 0x01, - 0x28, 0x02, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x4e, 0x73, 0x66, 0x77, 0x53, 0x63, 0x6f, 0x72, - 0x65, 0x12, 0x54, 0x0a, 0x17, 0x61, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, - 0x5f, 0x6e, 0x73, 0x66, 0x77, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x14, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x41, 0x6c, 0x74, 0x4e, 0x73, 0x66, 0x77, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x73, - 0x52, 0x15, 0x61, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x4e, 0x73, 0x66, - 0x77, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x57, 0x0a, 0x29, 0x69, 0x6d, 0x61, 0x67, 0x65, - 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x5f, 0x6f, 0x66, 0x5f, - 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, - 0x66, 0x69, 0x6c, 0x65, 0x18, 0x15, 0x20, 0x03, 0x28, 0x01, 0x52, 0x24, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4f, 0x66, 0x43, 0x61, - 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x46, 0x69, 0x6c, 0x65, - 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x14, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x3e, 0x0a, 0x1c, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x6f, - 0x66, 0x5f, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x68, 0x61, - 0x73, 0x68, 0x4f, 0x66, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, - 0x67, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x36, 0x0a, 0x18, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x61, - 0x70, 0x69, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x6f, 0x70, 0x65, 0x6e, 0x41, 0x70, - 0x69, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x30, - 0x0a, 0x14, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, - 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, 0x02, 0x52, 0x12, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, 0x63, 0x6f, 0x72, 0x65, - 0x12, 0x62, 0x0a, 0x2f, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6d, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x68, 0x75, 0x6d, 0x62, 0x6e, 0x61, 0x69, 0x6c, 0x5f, 0x77, 0x65, - 0x62, 0x70, 0x5f, 0x61, 0x73, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x36, 0x34, 0x5f, 0x73, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x29, 0x63, 0x61, 0x6e, 0x64, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x54, 0x68, 0x75, 0x6d, 0x62, 0x6e, 0x61, - 0x69, 0x6c, 0x57, 0x65, 0x62, 0x70, 0x41, 0x73, 0x42, 0x61, 0x73, 0x65, 0x36, 0x34, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x12, 0x64, 0x0a, 0x30, 0x64, 0x6f, 0x65, 0x73, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x69, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x5f, 0x74, 0x68, 0x65, 0x5f, 0x66, 0x6f, 0x6c, 0x6c, - 0x6f, 0x77, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x2a, - 0x64, 0x6f, 0x65, 0x73, 0x4e, 0x6f, 0x74, 0x49, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x54, 0x68, 0x65, - 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x73, - 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x73, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x73, - 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x53, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x1c, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x73, - 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x61, - 0x73, 0x6f, 0x6e, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x19, 0x69, 0x6e, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x53, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, - 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x2d, 0x73, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, 0x69, - 0x74, 0x79, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x72, 0x73, - 0x74, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x6e, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x02, 0x52, 0x27, 0x73, 0x69, 0x6d, - 0x69, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x46, 0x69, - 0x72, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x49, 0x6e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0d, 0x63, 0x70, - 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x63, - 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x18, 0x20, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x72, 0x6f, - 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x1e, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x5f, 0x73, 0x65, - 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x21, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x1b, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, - 0x6e, 0x74, 0x53, 0x65, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x22, 0xf7, 0x03, - 0x0a, 0x10, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, - 0x73, 0x73, 0x12, 0x6c, 0x0a, 0x35, 0x72, 0x61, 0x72, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x36, 0x34, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x2d, 0x72, 0x61, 0x72, 0x65, 0x4f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, - 0x74, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x73, 0x4a, - 0x73, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x36, 0x34, - 0x12, 0x58, 0x0a, 0x2a, 0x72, 0x61, 0x72, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x65, 0x74, 0x5f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, - 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x36, 0x34, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x24, 0x72, 0x61, 0x72, 0x65, 0x4f, 0x6e, 0x49, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x65, 0x74, 0x47, 0x72, 0x61, 0x70, 0x68, 0x4a, 0x73, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x36, 0x34, 0x12, 0x72, 0x0a, 0x38, 0x61, 0x6c, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, 0x61, 0x72, 0x65, 0x5f, 0x6f, - 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x5f, 0x64, 0x69, 0x63, 0x74, 0x5f, - 0x61, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, - 0x65, 0x64, 0x5f, 0x62, 0x36, 0x34, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x30, 0x61, 0x6c, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x52, 0x61, 0x72, 0x65, 0x4f, 0x6e, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x44, 0x69, 0x63, 0x74, 0x41, 0x73, 0x4a, 0x73, 0x6f, - 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x36, 0x34, 0x12, 0x4a, - 0x0a, 0x23, 0x6d, 0x69, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, - 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x69, 0x6e, - 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1d, 0x6d, 0x69, 0x6e, - 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x45, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x73, 0x49, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x12, 0x5b, 0x0a, 0x2b, 0x65, 0x61, - 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, - 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x26, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, - 0x6c, 0x65, 0x44, 0x61, 0x74, 0x65, 0x4f, 0x66, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x85, 0x01, 0x0a, 0x0d, 0x41, 0x6c, 0x74, 0x4e, - 0x73, 0x66, 0x77, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x72, 0x61, - 0x77, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x64, 0x72, 0x61, - 0x77, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x6e, 0x74, 0x61, 0x69, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x06, 0x68, 0x65, 0x6e, 0x74, 0x61, 0x69, 0x12, 0x18, 0x0a, - 0x07, 0x6e, 0x65, 0x75, 0x74, 0x72, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x02, 0x52, 0x07, - 0x6e, 0x65, 0x75, 0x74, 0x72, 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x02, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x73, - 0x65, 0x78, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x04, 0x73, 0x65, 0x78, 0x79, 0x22, - 0x12, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x22, 0xce, 0x01, 0x0a, 0x09, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x43, 0x6f, - 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x61, 0x69, 0x74, 0x69, 0x6e, - 0x67, 0x5f, 0x69, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0e, 0x77, 0x61, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x12, 0x16, - 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, - 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, - 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x63, 0x61, 0x6e, 0x63, 0x65, - 0x6c, 0x6c, 0x65, 0x64, 0x22, 0xf9, 0x03, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x12, 0x3c, 0x0a, 0x1b, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x5f, - 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, - 0x65, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x17, 0x61, 0x76, 0x65, 0x72, 0x61, - 0x67, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x57, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, - 0x63, 0x73, 0x12, 0x34, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x77, - 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x02, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x54, 0x61, 0x73, 0x6b, 0x57, 0x61, 0x69, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x73, 0x12, 0x46, 0x0a, 0x20, 0x61, 0x76, 0x65, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x02, 0x52, 0x1c, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x73, - 0x12, 0x53, 0x0a, 0x27, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, - 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x22, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x69, - 0x72, 0x74, 0x75, 0x61, 0x6c, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, - 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x23, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x73, 0x73, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, - 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x1e, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, - 0x73, 0x73, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x12, 0x45, 0x0a, 0x20, 0x70, 0x65, 0x61, 0x6b, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x72, 0x73, 0x73, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1b, 0x70, 0x65, - 0x61, 0x6b, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x73, 0x73, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, - 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x20, 0x70, 0x65, 0x61, - 0x6b, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x76, 0x6d, 0x73, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, - 0x79, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x1b, 0x70, 0x65, 0x61, 0x6b, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x6d, 0x73, - 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x22, 0xa5, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x37, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x09, - 0x74, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x73, - 0x6b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x32, 0xc8, 0x01, 0x0a, 0x13, 0x44, 0x75, 0x70, - 0x65, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x12, 0x61, 0x0a, 0x12, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, - 0x73, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x23, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, - 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x64, 0x75, - 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x49, 0x6d, 0x61, 0x67, - 0x65, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x4e, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x1f, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x20, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x64, - 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_dd_server_proto_rawDescOnce sync.Once - file_dd_server_proto_rawDescData = file_dd_server_proto_rawDesc -) - -func file_dd_server_proto_rawDescGZIP() []byte { - file_dd_server_proto_rawDescOnce.Do(func() { - file_dd_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_dd_server_proto_rawDescData) - }) - return file_dd_server_proto_rawDescData -} - -var file_dd_server_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_dd_server_proto_goTypes = []any{ - (*RarenessScoreRequest)(nil), // 0: dupedetection.RarenessScoreRequest - (*ImageRarenessScoreReply)(nil), // 1: dupedetection.ImageRarenessScoreReply - (*InternetRareness)(nil), // 2: dupedetection.InternetRareness - (*AltNsfwScores)(nil), // 3: dupedetection.AltNsfwScores - (*GetStatusRequest)(nil), // 4: dupedetection.GetStatusRequest - (*TaskCount)(nil), // 5: dupedetection.TaskCount - (*TaskMetrics)(nil), // 6: dupedetection.TaskMetrics - (*GetStatusResponse)(nil), // 7: dupedetection.GetStatusResponse -} -var file_dd_server_proto_depIdxs = []int32{ - 2, // 0: dupedetection.ImageRarenessScoreReply.internet_rareness:type_name -> dupedetection.InternetRareness - 3, // 1: dupedetection.ImageRarenessScoreReply.alternative_nsfw_scores:type_name -> dupedetection.AltNsfwScores - 5, // 2: dupedetection.GetStatusResponse.task_count:type_name -> dupedetection.TaskCount - 6, // 3: dupedetection.GetStatusResponse.task_metrics:type_name -> dupedetection.TaskMetrics - 0, // 4: dupedetection.DupeDetectionServer.ImageRarenessScore:input_type -> dupedetection.RarenessScoreRequest - 4, // 5: dupedetection.DupeDetectionServer.GetStatus:input_type -> dupedetection.GetStatusRequest - 1, // 6: dupedetection.DupeDetectionServer.ImageRarenessScore:output_type -> dupedetection.ImageRarenessScoreReply - 7, // 7: dupedetection.DupeDetectionServer.GetStatus:output_type -> dupedetection.GetStatusResponse - 6, // [6:8] is the sub-list for method output_type - 4, // [4:6] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_dd_server_proto_init() } -func file_dd_server_proto_init() { - if File_dd_server_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_dd_server_proto_rawDesc, - NumEnums: 0, - NumMessages: 8, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_dd_server_proto_goTypes, - DependencyIndexes: file_dd_server_proto_depIdxs, - MessageInfos: file_dd_server_proto_msgTypes, - }.Build() - File_dd_server_proto = out.File - file_dd_server_proto_rawDesc = nil - file_dd_server_proto_goTypes = nil - file_dd_server_proto_depIdxs = nil -} diff --git a/gen/dupedetection/dd-server_grpc.pb.go b/gen/dupedetection/dd-server_grpc.pb.go deleted file mode 100644 index 27ee79bf..00000000 --- a/gen/dupedetection/dd-server_grpc.pb.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v3.12.4 -// source: dd-server.proto - -package dupedetection - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - DupeDetectionServer_ImageRarenessScore_FullMethodName = "/dupedetection.DupeDetectionServer/ImageRarenessScore" - DupeDetectionServer_GetStatus_FullMethodName = "/dupedetection.DupeDetectionServer/GetStatus" -) - -// DupeDetectionServerClient is the client API for DupeDetectionServer service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type DupeDetectionServerClient interface { - ImageRarenessScore(ctx context.Context, in *RarenessScoreRequest, opts ...grpc.CallOption) (*ImageRarenessScoreReply, error) - GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error) -} - -type dupeDetectionServerClient struct { - cc grpc.ClientConnInterface -} - -func NewDupeDetectionServerClient(cc grpc.ClientConnInterface) DupeDetectionServerClient { - return &dupeDetectionServerClient{cc} -} - -func (c *dupeDetectionServerClient) ImageRarenessScore(ctx context.Context, in *RarenessScoreRequest, opts ...grpc.CallOption) (*ImageRarenessScoreReply, error) { - out := new(ImageRarenessScoreReply) - err := c.cc.Invoke(ctx, DupeDetectionServer_ImageRarenessScore_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *dupeDetectionServerClient) GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error) { - out := new(GetStatusResponse) - err := c.cc.Invoke(ctx, DupeDetectionServer_GetStatus_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// DupeDetectionServerServer is the server API for DupeDetectionServer service. -// All implementations must embed UnimplementedDupeDetectionServerServer -// for forward compatibility -type DupeDetectionServerServer interface { - ImageRarenessScore(context.Context, *RarenessScoreRequest) (*ImageRarenessScoreReply, error) - GetStatus(context.Context, *GetStatusRequest) (*GetStatusResponse, error) - mustEmbedUnimplementedDupeDetectionServerServer() -} - -// UnimplementedDupeDetectionServerServer must be embedded to have forward compatible implementations. -type UnimplementedDupeDetectionServerServer struct { -} - -func (UnimplementedDupeDetectionServerServer) ImageRarenessScore(context.Context, *RarenessScoreRequest) (*ImageRarenessScoreReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method ImageRarenessScore not implemented") -} -func (UnimplementedDupeDetectionServerServer) GetStatus(context.Context, *GetStatusRequest) (*GetStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetStatus not implemented") -} -func (UnimplementedDupeDetectionServerServer) mustEmbedUnimplementedDupeDetectionServerServer() {} - -// UnsafeDupeDetectionServerServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to DupeDetectionServerServer will -// result in compilation errors. -type UnsafeDupeDetectionServerServer interface { - mustEmbedUnimplementedDupeDetectionServerServer() -} - -func RegisterDupeDetectionServerServer(s grpc.ServiceRegistrar, srv DupeDetectionServerServer) { - s.RegisterService(&DupeDetectionServer_ServiceDesc, srv) -} - -func _DupeDetectionServer_ImageRarenessScore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RarenessScoreRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DupeDetectionServerServer).ImageRarenessScore(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: DupeDetectionServer_ImageRarenessScore_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DupeDetectionServerServer).ImageRarenessScore(ctx, req.(*RarenessScoreRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _DupeDetectionServer_GetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DupeDetectionServerServer).GetStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: DupeDetectionServer_GetStatus_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DupeDetectionServerServer).GetStatus(ctx, req.(*GetStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// DupeDetectionServer_ServiceDesc is the grpc.ServiceDesc for DupeDetectionServer service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var DupeDetectionServer_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "dupedetection.DupeDetectionServer", - HandlerType: (*DupeDetectionServerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ImageRarenessScore", - Handler: _DupeDetectionServer_ImageRarenessScore_Handler, - }, - { - MethodName: "GetStatus", - Handler: _DupeDetectionServer_GetStatus_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "dd-server.proto", -} diff --git a/gen/raptorq/raptorq.pb.go b/gen/raptorq/raptorq.pb.go deleted file mode 100644 index 8c9ba9d0..00000000 --- a/gen/raptorq/raptorq.pb.go +++ /dev/null @@ -1,476 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.35.1 -// protoc v3.12.4 -// source: raptorq.proto - -package raptorq - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type EncodeMetaDataRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - FilesNumber uint32 `protobuf:"varint,2,opt,name=files_number,json=filesNumber,proto3" json:"files_number,omitempty"` - BlockHash string `protobuf:"bytes,3,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` - PastelId string `protobuf:"bytes,4,opt,name=pastel_id,json=pastelId,proto3" json:"pastel_id,omitempty"` -} - -func (x *EncodeMetaDataRequest) Reset() { - *x = EncodeMetaDataRequest{} - mi := &file_raptorq_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EncodeMetaDataRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EncodeMetaDataRequest) ProtoMessage() {} - -func (x *EncodeMetaDataRequest) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EncodeMetaDataRequest.ProtoReflect.Descriptor instead. -func (*EncodeMetaDataRequest) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{0} -} - -func (x *EncodeMetaDataRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *EncodeMetaDataRequest) GetFilesNumber() uint32 { - if x != nil { - return x.FilesNumber - } - return 0 -} - -func (x *EncodeMetaDataRequest) GetBlockHash() string { - if x != nil { - return x.BlockHash - } - return "" -} - -func (x *EncodeMetaDataRequest) GetPastelId() string { - if x != nil { - return x.PastelId - } - return "" -} - -type EncodeMetaDataReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EncoderParameters []byte `protobuf:"bytes,1,opt,name=encoder_parameters,json=encoderParameters,proto3" json:"encoder_parameters,omitempty"` - SymbolsCount uint32 `protobuf:"varint,2,opt,name=symbols_count,json=symbolsCount,proto3" json:"symbols_count,omitempty"` - Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *EncodeMetaDataReply) Reset() { - *x = EncodeMetaDataReply{} - mi := &file_raptorq_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EncodeMetaDataReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EncodeMetaDataReply) ProtoMessage() {} - -func (x *EncodeMetaDataReply) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EncodeMetaDataReply.ProtoReflect.Descriptor instead. -func (*EncodeMetaDataReply) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{1} -} - -func (x *EncodeMetaDataReply) GetEncoderParameters() []byte { - if x != nil { - return x.EncoderParameters - } - return nil -} - -func (x *EncodeMetaDataReply) GetSymbolsCount() uint32 { - if x != nil { - return x.SymbolsCount - } - return 0 -} - -func (x *EncodeMetaDataReply) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type EncodeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *EncodeRequest) Reset() { - *x = EncodeRequest{} - mi := &file_raptorq_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EncodeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EncodeRequest) ProtoMessage() {} - -func (x *EncodeRequest) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EncodeRequest.ProtoReflect.Descriptor instead. -func (*EncodeRequest) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{2} -} - -func (x *EncodeRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type EncodeReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EncoderParameters []byte `protobuf:"bytes,1,opt,name=encoder_parameters,json=encoderParameters,proto3" json:"encoder_parameters,omitempty"` - SymbolsCount uint32 `protobuf:"varint,2,opt,name=symbols_count,json=symbolsCount,proto3" json:"symbols_count,omitempty"` - Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *EncodeReply) Reset() { - *x = EncodeReply{} - mi := &file_raptorq_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EncodeReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EncodeReply) ProtoMessage() {} - -func (x *EncodeReply) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EncodeReply.ProtoReflect.Descriptor instead. -func (*EncodeReply) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{3} -} - -func (x *EncodeReply) GetEncoderParameters() []byte { - if x != nil { - return x.EncoderParameters - } - return nil -} - -func (x *EncodeReply) GetSymbolsCount() uint32 { - if x != nil { - return x.SymbolsCount - } - return 0 -} - -func (x *EncodeReply) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type DecodeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EncoderParameters []byte `protobuf:"bytes,1,opt,name=encoder_parameters,json=encoderParameters,proto3" json:"encoder_parameters,omitempty"` - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *DecodeRequest) Reset() { - *x = DecodeRequest{} - mi := &file_raptorq_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DecodeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DecodeRequest) ProtoMessage() {} - -func (x *DecodeRequest) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DecodeRequest.ProtoReflect.Descriptor instead. -func (*DecodeRequest) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{4} -} - -func (x *DecodeRequest) GetEncoderParameters() []byte { - if x != nil { - return x.EncoderParameters - } - return nil -} - -func (x *DecodeRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type DecodeReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *DecodeReply) Reset() { - *x = DecodeReply{} - mi := &file_raptorq_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DecodeReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DecodeReply) ProtoMessage() {} - -func (x *DecodeReply) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DecodeReply.ProtoReflect.Descriptor instead. -func (*DecodeReply) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{5} -} - -func (x *DecodeReply) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -var File_raptorq_proto protoreflect.FileDescriptor - -var file_raptorq_proto_rawDesc = []byte{ - 0x0a, 0x0d, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x07, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x22, 0x8a, 0x01, 0x0a, 0x15, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5f, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x66, 0x69, - 0x6c, 0x65, 0x73, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x73, 0x74, - 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x49, 0x64, 0x22, 0x7d, 0x0a, 0x13, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x4d, - 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2d, 0x0a, 0x12, - 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, - 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, - 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x0c, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x22, 0x23, 0x0a, 0x0d, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x75, 0x0a, 0x0b, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x79, 0x6d, 0x62, 0x6f, - 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, - 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x22, 0x52, 0x0a, 0x0d, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x65, - 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x22, 0x21, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x32, 0xc9, 0x01, 0x0a, 0x07, 0x52, 0x61, 0x70, 0x74, - 0x6f, 0x72, 0x51, 0x12, 0x4e, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1e, 0x2e, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, 0x06, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x2e, - 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, 0x06, 0x44, - 0x65, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, - 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, - 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x72, - 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_raptorq_proto_rawDescOnce sync.Once - file_raptorq_proto_rawDescData = file_raptorq_proto_rawDesc -) - -func file_raptorq_proto_rawDescGZIP() []byte { - file_raptorq_proto_rawDescOnce.Do(func() { - file_raptorq_proto_rawDescData = protoimpl.X.CompressGZIP(file_raptorq_proto_rawDescData) - }) - return file_raptorq_proto_rawDescData -} - -var file_raptorq_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_raptorq_proto_goTypes = []any{ - (*EncodeMetaDataRequest)(nil), // 0: raptorq.EncodeMetaDataRequest - (*EncodeMetaDataReply)(nil), // 1: raptorq.EncodeMetaDataReply - (*EncodeRequest)(nil), // 2: raptorq.EncodeRequest - (*EncodeReply)(nil), // 3: raptorq.EncodeReply - (*DecodeRequest)(nil), // 4: raptorq.DecodeRequest - (*DecodeReply)(nil), // 5: raptorq.DecodeReply -} -var file_raptorq_proto_depIdxs = []int32{ - 0, // 0: raptorq.RaptorQ.EncodeMetaData:input_type -> raptorq.EncodeMetaDataRequest - 2, // 1: raptorq.RaptorQ.Encode:input_type -> raptorq.EncodeRequest - 4, // 2: raptorq.RaptorQ.Decode:input_type -> raptorq.DecodeRequest - 1, // 3: raptorq.RaptorQ.EncodeMetaData:output_type -> raptorq.EncodeMetaDataReply - 3, // 4: raptorq.RaptorQ.Encode:output_type -> raptorq.EncodeReply - 5, // 5: raptorq.RaptorQ.Decode:output_type -> raptorq.DecodeReply - 3, // [3:6] is the sub-list for method output_type - 0, // [0:3] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_raptorq_proto_init() } -func file_raptorq_proto_init() { - if File_raptorq_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_raptorq_proto_rawDesc, - NumEnums: 0, - NumMessages: 6, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_raptorq_proto_goTypes, - DependencyIndexes: file_raptorq_proto_depIdxs, - MessageInfos: file_raptorq_proto_msgTypes, - }.Build() - File_raptorq_proto = out.File - file_raptorq_proto_rawDesc = nil - file_raptorq_proto_goTypes = nil - file_raptorq_proto_depIdxs = nil -} diff --git a/gen/raptorq/raptorq_grpc.pb.go b/gen/raptorq/raptorq_grpc.pb.go deleted file mode 100644 index 01c17ae8..00000000 --- a/gen/raptorq/raptorq_grpc.pb.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v3.12.4 -// source: raptorq.proto - -package raptorq - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - RaptorQ_EncodeMetaData_FullMethodName = "/raptorq.RaptorQ/EncodeMetaData" - RaptorQ_Encode_FullMethodName = "/raptorq.RaptorQ/Encode" - RaptorQ_Decode_FullMethodName = "/raptorq.RaptorQ/Decode" -) - -// RaptorQClient is the client API for RaptorQ service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type RaptorQClient interface { - EncodeMetaData(ctx context.Context, in *EncodeMetaDataRequest, opts ...grpc.CallOption) (*EncodeMetaDataReply, error) - Encode(ctx context.Context, in *EncodeRequest, opts ...grpc.CallOption) (*EncodeReply, error) - Decode(ctx context.Context, in *DecodeRequest, opts ...grpc.CallOption) (*DecodeReply, error) -} - -type raptorQClient struct { - cc grpc.ClientConnInterface -} - -func NewRaptorQClient(cc grpc.ClientConnInterface) RaptorQClient { - return &raptorQClient{cc} -} - -func (c *raptorQClient) EncodeMetaData(ctx context.Context, in *EncodeMetaDataRequest, opts ...grpc.CallOption) (*EncodeMetaDataReply, error) { - out := new(EncodeMetaDataReply) - err := c.cc.Invoke(ctx, RaptorQ_EncodeMetaData_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *raptorQClient) Encode(ctx context.Context, in *EncodeRequest, opts ...grpc.CallOption) (*EncodeReply, error) { - out := new(EncodeReply) - err := c.cc.Invoke(ctx, RaptorQ_Encode_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *raptorQClient) Decode(ctx context.Context, in *DecodeRequest, opts ...grpc.CallOption) (*DecodeReply, error) { - out := new(DecodeReply) - err := c.cc.Invoke(ctx, RaptorQ_Decode_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// RaptorQServer is the server API for RaptorQ service. -// All implementations must embed UnimplementedRaptorQServer -// for forward compatibility -type RaptorQServer interface { - EncodeMetaData(context.Context, *EncodeMetaDataRequest) (*EncodeMetaDataReply, error) - Encode(context.Context, *EncodeRequest) (*EncodeReply, error) - Decode(context.Context, *DecodeRequest) (*DecodeReply, error) - mustEmbedUnimplementedRaptorQServer() -} - -// UnimplementedRaptorQServer must be embedded to have forward compatible implementations. -type UnimplementedRaptorQServer struct { -} - -func (UnimplementedRaptorQServer) EncodeMetaData(context.Context, *EncodeMetaDataRequest) (*EncodeMetaDataReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method EncodeMetaData not implemented") -} -func (UnimplementedRaptorQServer) Encode(context.Context, *EncodeRequest) (*EncodeReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method Encode not implemented") -} -func (UnimplementedRaptorQServer) Decode(context.Context, *DecodeRequest) (*DecodeReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method Decode not implemented") -} -func (UnimplementedRaptorQServer) mustEmbedUnimplementedRaptorQServer() {} - -// UnsafeRaptorQServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to RaptorQServer will -// result in compilation errors. -type UnsafeRaptorQServer interface { - mustEmbedUnimplementedRaptorQServer() -} - -func RegisterRaptorQServer(s grpc.ServiceRegistrar, srv RaptorQServer) { - s.RegisterService(&RaptorQ_ServiceDesc, srv) -} - -func _RaptorQ_EncodeMetaData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EncodeMetaDataRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RaptorQServer).EncodeMetaData(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: RaptorQ_EncodeMetaData_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RaptorQServer).EncodeMetaData(ctx, req.(*EncodeMetaDataRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _RaptorQ_Encode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EncodeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RaptorQServer).Encode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: RaptorQ_Encode_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RaptorQServer).Encode(ctx, req.(*EncodeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _RaptorQ_Decode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DecodeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RaptorQServer).Decode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: RaptorQ_Decode_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RaptorQServer).Decode(ctx, req.(*DecodeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// RaptorQ_ServiceDesc is the grpc.ServiceDesc for RaptorQ service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var RaptorQ_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "raptorq.RaptorQ", - HandlerType: (*RaptorQServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "EncodeMetaData", - Handler: _RaptorQ_EncodeMetaData_Handler, - }, - { - MethodName: "Encode", - Handler: _RaptorQ_Encode_Handler, - }, - { - MethodName: "Decode", - Handler: _RaptorQ_Decode_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "raptorq.proto", -} diff --git a/gen/supernode/action/cascade/service.pb.go b/gen/supernode/action/cascade/service.pb.go index dd083d04..f270a051 100644 --- a/gen/supernode/action/cascade/service.pb.go +++ b/gen/supernode/action/cascade/service.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.21.12 // source: supernode/action/cascade/service.proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -116,15 +117,14 @@ func (SupernodeEventType) EnumDescriptor() ([]byte, []int) { } type RegisterRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to RequestType: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to RequestType: // // *RegisterRequest_Chunk // *RegisterRequest_Metadata - RequestType isRegisterRequest_RequestType `protobuf_oneof:"request_type"` + RequestType isRegisterRequest_RequestType `protobuf_oneof:"request_type"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RegisterRequest) Reset() { @@ -157,23 +157,27 @@ func (*RegisterRequest) Descriptor() ([]byte, []int) { return file_supernode_action_cascade_service_proto_rawDescGZIP(), []int{0} } -func (m *RegisterRequest) GetRequestType() isRegisterRequest_RequestType { - if m != nil { - return m.RequestType +func (x *RegisterRequest) GetRequestType() isRegisterRequest_RequestType { + if x != nil { + return x.RequestType } return nil } func (x *RegisterRequest) GetChunk() *DataChunk { - if x, ok := x.GetRequestType().(*RegisterRequest_Chunk); ok { - return x.Chunk + if x != nil { + if x, ok := x.RequestType.(*RegisterRequest_Chunk); ok { + return x.Chunk + } } return nil } func (x *RegisterRequest) GetMetadata() *Metadata { - if x, ok := x.GetRequestType().(*RegisterRequest_Metadata); ok { - return x.Metadata + if x != nil { + if x, ok := x.RequestType.(*RegisterRequest_Metadata); ok { + return x.Metadata + } } return nil } @@ -195,11 +199,10 @@ func (*RegisterRequest_Chunk) isRegisterRequest_RequestType() {} func (*RegisterRequest_Metadata) isRegisterRequest_RequestType() {} type DataChunk struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` unknownFields protoimpl.UnknownFields - - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DataChunk) Reset() { @@ -240,12 +243,11 @@ func (x *DataChunk) GetData() []byte { } type Metadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + ActionId string `protobuf:"bytes,2,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` unknownFields protoimpl.UnknownFields - - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - ActionId string `protobuf:"bytes,2,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Metadata) Reset() { @@ -293,13 +295,12 @@ func (x *Metadata) GetActionId() string { } type RegisterResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + EventType SupernodeEventType `protobuf:"varint,1,opt,name=event_type,json=eventType,proto3,enum=cascade.SupernodeEventType" json:"event_type,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + TxHash string `protobuf:"bytes,3,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` unknownFields protoimpl.UnknownFields - - EventType SupernodeEventType `protobuf:"varint,1,opt,name=event_type,json=eventType,proto3,enum=cascade.SupernodeEventType" json:"event_type,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - TxHash string `protobuf:"bytes,3,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RegisterResponse) Reset() { @@ -354,12 +355,11 @@ func (x *RegisterResponse) GetTxHash() string { } type DownloadRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` + Signature string `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` unknownFields protoimpl.UnknownFields - - ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` - Signature string `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DownloadRequest) Reset() { @@ -407,15 +407,14 @@ func (x *DownloadRequest) GetSignature() string { } type DownloadResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to ResponseType: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to ResponseType: // // *DownloadResponse_Event // *DownloadResponse_Chunk - ResponseType isDownloadResponse_ResponseType `protobuf_oneof:"response_type"` + ResponseType isDownloadResponse_ResponseType `protobuf_oneof:"response_type"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DownloadResponse) Reset() { @@ -448,23 +447,27 @@ func (*DownloadResponse) Descriptor() ([]byte, []int) { return file_supernode_action_cascade_service_proto_rawDescGZIP(), []int{5} } -func (m *DownloadResponse) GetResponseType() isDownloadResponse_ResponseType { - if m != nil { - return m.ResponseType +func (x *DownloadResponse) GetResponseType() isDownloadResponse_ResponseType { + if x != nil { + return x.ResponseType } return nil } func (x *DownloadResponse) GetEvent() *DownloadEvent { - if x, ok := x.GetResponseType().(*DownloadResponse_Event); ok { - return x.Event + if x != nil { + if x, ok := x.ResponseType.(*DownloadResponse_Event); ok { + return x.Event + } } return nil } func (x *DownloadResponse) GetChunk() *DataChunk { - if x, ok := x.GetResponseType().(*DownloadResponse_Chunk); ok { - return x.Chunk + if x != nil { + if x, ok := x.ResponseType.(*DownloadResponse_Chunk); ok { + return x.Chunk + } } return nil } @@ -486,12 +489,11 @@ func (*DownloadResponse_Event) isDownloadResponse_ResponseType() {} func (*DownloadResponse_Chunk) isDownloadResponse_ResponseType() {} type DownloadEvent struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + EventType SupernodeEventType `protobuf:"varint,1,opt,name=event_type,json=eventType,proto3,enum=cascade.SupernodeEventType" json:"event_type,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` unknownFields protoimpl.UnknownFields - - EventType SupernodeEventType `protobuf:"varint,1,opt,name=event_type,json=eventType,proto3,enum=cascade.SupernodeEventType" json:"event_type,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DownloadEvent) Reset() { @@ -540,104 +542,66 @@ func (x *DownloadEvent) GetMessage() string { var File_supernode_action_cascade_service_proto protoreflect.FileDescriptor -var file_supernode_action_cascade_service_proto_rawDesc = []byte{ - 0x0a, 0x26, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2f, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, - 0x65, 0x22, 0x7e, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x44, 0x61, - 0x74, 0x61, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x48, 0x00, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, - 0x12, 0x2f, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x42, 0x0e, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x22, 0x1f, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x12, - 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, - 0x74, 0x61, 0x22, 0x40, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x17, - 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x81, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, - 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, - 0x17, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x22, 0x4c, 0x0a, 0x0f, 0x44, 0x6f, 0x77, 0x6e, - 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x7f, 0x0a, 0x10, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x61, 0x73, 0x63, - 0x61, 0x64, 0x65, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x48, 0x00, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x63, 0x68, - 0x75, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x61, 0x73, 0x63, - 0x61, 0x64, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x48, 0x00, 0x52, - 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x42, 0x0f, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x65, 0x0a, 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x6c, - 0x6f, 0x61, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x63, - 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2a, 0xb3, - 0x03, 0x0a, 0x12, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x54, - 0x52, 0x49, 0x45, 0x56, 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x43, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x46, 0x45, 0x45, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x54, 0x4f, 0x50, 0x5f, 0x53, 0x55, 0x50, 0x45, 0x52, 0x4e, 0x4f, - 0x44, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x45, 0x44, 0x10, - 0x03, 0x12, 0x14, 0x0a, 0x10, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x44, 0x45, - 0x43, 0x4f, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x41, 0x54, 0x41, 0x5f, - 0x48, 0x41, 0x53, 0x48, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x05, 0x12, - 0x11, 0x0a, 0x0d, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x45, 0x44, - 0x10, 0x06, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x49, 0x47, 0x4e, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, - 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x51, - 0x49, 0x44, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x11, - 0x0a, 0x0d, 0x52, 0x51, 0x49, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x09, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x5a, 0x45, 0x5f, 0x53, 0x49, - 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x52, 0x54, - 0x45, 0x46, 0x41, 0x43, 0x54, 0x53, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x44, 0x10, 0x0b, 0x12, - 0x14, 0x0a, 0x10, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x49, 0x4e, 0x41, 0x4c, 0x49, - 0x5a, 0x45, 0x44, 0x10, 0x0c, 0x12, 0x18, 0x0a, 0x14, 0x41, 0x52, 0x54, 0x45, 0x46, 0x41, 0x43, - 0x54, 0x53, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x4c, 0x4f, 0x41, 0x44, 0x45, 0x44, 0x10, 0x0d, 0x12, - 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x5a, 0x45, 0x5f, 0x53, 0x49, 0x4d, 0x55, - 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x0e, 0x12, - 0x1c, 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, - 0x45, 0x56, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0f, 0x12, 0x14, 0x0a, - 0x10, 0x44, 0x45, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, - 0x44, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x45, 0x52, 0x56, 0x45, 0x5f, 0x52, 0x45, 0x41, - 0x44, 0x59, 0x10, 0x11, 0x32, 0x98, 0x01, 0x0a, 0x0e, 0x43, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x65, 0x72, 0x12, 0x18, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x52, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, - 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x41, 0x0a, 0x08, - 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x18, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, - 0x64, 0x65, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x44, 0x6f, 0x77, - 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, - 0x45, 0x5a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, - 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, - 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_supernode_action_cascade_service_proto_rawDesc = "" + + "\n" + + "&supernode/action/cascade/service.proto\x12\acascade\"~\n" + + "\x0fRegisterRequest\x12*\n" + + "\x05chunk\x18\x01 \x01(\v2\x12.cascade.DataChunkH\x00R\x05chunk\x12/\n" + + "\bmetadata\x18\x02 \x01(\v2\x11.cascade.MetadataH\x00R\bmetadataB\x0e\n" + + "\frequest_type\"\x1f\n" + + "\tDataChunk\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data\"@\n" + + "\bMetadata\x12\x17\n" + + "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" + + "\taction_id\x18\x02 \x01(\tR\bactionId\"\x81\x01\n" + + "\x10RegisterResponse\x12:\n" + + "\n" + + "event_type\x18\x01 \x01(\x0e2\x1b.cascade.SupernodeEventTypeR\teventType\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage\x12\x17\n" + + "\atx_hash\x18\x03 \x01(\tR\x06txHash\"L\n" + + "\x0fDownloadRequest\x12\x1b\n" + + "\taction_id\x18\x01 \x01(\tR\bactionId\x12\x1c\n" + + "\tsignature\x18\x02 \x01(\tR\tsignature\"\x7f\n" + + "\x10DownloadResponse\x12.\n" + + "\x05event\x18\x01 \x01(\v2\x16.cascade.DownloadEventH\x00R\x05event\x12*\n" + + "\x05chunk\x18\x02 \x01(\v2\x12.cascade.DataChunkH\x00R\x05chunkB\x0f\n" + + "\rresponse_type\"e\n" + + "\rDownloadEvent\x12:\n" + + "\n" + + "event_type\x18\x01 \x01(\x0e2\x1b.cascade.SupernodeEventTypeR\teventType\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage*\xb3\x03\n" + + "\x12SupernodeEventType\x12\v\n" + + "\aUNKNOWN\x10\x00\x12\x14\n" + + "\x10ACTION_RETRIEVED\x10\x01\x12\x17\n" + + "\x13ACTION_FEE_VERIFIED\x10\x02\x12\x1e\n" + + "\x1aTOP_SUPERNODE_CHECK_PASSED\x10\x03\x12\x14\n" + + "\x10METADATA_DECODED\x10\x04\x12\x16\n" + + "\x12DATA_HASH_VERIFIED\x10\x05\x12\x11\n" + + "\rINPUT_ENCODED\x10\x06\x12\x16\n" + + "\x12SIGNATURE_VERIFIED\x10\a\x12\x12\n" + + "\x0eRQID_GENERATED\x10\b\x12\x11\n" + + "\rRQID_VERIFIED\x10\t\x12\x16\n" + + "\x12FINALIZE_SIMULATED\x10\n" + + "\x12\x14\n" + + "\x10ARTEFACTS_STORED\x10\v\x12\x14\n" + + "\x10ACTION_FINALIZED\x10\f\x12\x18\n" + + "\x14ARTEFACTS_DOWNLOADED\x10\r\x12\x1e\n" + + "\x1aFINALIZE_SIMULATION_FAILED\x10\x0e\x12\x1c\n" + + "\x18NETWORK_RETRIEVE_STARTED\x10\x0f\x12\x14\n" + + "\x10DECODE_COMPLETED\x10\x10\x12\x0f\n" + + "\vSERVE_READY\x10\x112\x98\x01\n" + + "\x0eCascadeService\x12C\n" + + "\bRegister\x12\x18.cascade.RegisterRequest\x1a\x19.cascade.RegisterResponse(\x010\x01\x12A\n" + + "\bDownload\x12\x18.cascade.DownloadRequest\x1a\x19.cascade.DownloadResponse0\x01BEZCgithub.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascadeb\x06proto3" var ( file_supernode_action_cascade_service_proto_rawDescOnce sync.Once - file_supernode_action_cascade_service_proto_rawDescData = file_supernode_action_cascade_service_proto_rawDesc + file_supernode_action_cascade_service_proto_rawDescData []byte ) func file_supernode_action_cascade_service_proto_rawDescGZIP() []byte { file_supernode_action_cascade_service_proto_rawDescOnce.Do(func() { - file_supernode_action_cascade_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_action_cascade_service_proto_rawDescData) + file_supernode_action_cascade_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_supernode_action_cascade_service_proto_rawDesc), len(file_supernode_action_cascade_service_proto_rawDesc))) }) return file_supernode_action_cascade_service_proto_rawDescData } @@ -689,7 +653,7 @@ func file_supernode_action_cascade_service_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_supernode_action_cascade_service_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_supernode_action_cascade_service_proto_rawDesc), len(file_supernode_action_cascade_service_proto_rawDesc)), NumEnums: 1, NumMessages: 7, NumExtensions: 0, @@ -701,7 +665,6 @@ func file_supernode_action_cascade_service_proto_init() { MessageInfos: file_supernode_action_cascade_service_proto_msgTypes, }.Build() File_supernode_action_cascade_service_proto = out.File - file_supernode_action_cascade_service_proto_rawDesc = nil file_supernode_action_cascade_service_proto_goTypes = nil file_supernode_action_cascade_service_proto_depIdxs = nil } diff --git a/gen/supernode/agents/.gitkeep b/gen/supernode/agents/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/gen/supernode/service.pb.go b/gen/supernode/service.pb.go index b8399095..ad1ff814 100644 --- a/gen/supernode/service.pb.go +++ b/gen/supernode/service.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.21.12 // source: supernode/service.proto @@ -12,6 +12,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -22,9 +23,9 @@ const ( ) type ListServicesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ListServicesRequest) Reset() { @@ -58,12 +59,11 @@ func (*ListServicesRequest) Descriptor() ([]byte, []int) { } type ListServicesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Services []*ServiceInfo `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` + Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` unknownFields protoimpl.UnknownFields - - Services []*ServiceInfo `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` - Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ListServicesResponse) Reset() { @@ -111,12 +111,11 @@ func (x *ListServicesResponse) GetCount() int32 { } type ServiceInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Methods []string `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Methods []string `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ServiceInfo) Reset() { @@ -163,77 +162,228 @@ func (x *ServiceInfo) GetMethods() []string { return nil } -var File_supernode_service_proto protoreflect.FileDescriptor +// Raw pprof request/response messages +type RawPprofRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Debug int32 `protobuf:"varint,1,opt,name=debug,proto3" json:"debug,omitempty"` // Debug level (0 for binary, >0 for text) + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RawPprofRequest) Reset() { + *x = RawPprofRequest{} + mi := &file_supernode_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RawPprofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawPprofRequest) ProtoMessage() {} + +func (x *RawPprofRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawPprofRequest.ProtoReflect.Descriptor instead. +func (*RawPprofRequest) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{3} +} + +func (x *RawPprofRequest) GetDebug() int32 { + if x != nil { + return x.Debug + } + return 0 +} + +type RawPprofCpuRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Seconds int32 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // CPU profile duration in seconds (default 30) + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RawPprofCpuRequest) Reset() { + *x = RawPprofCpuRequest{} + mi := &file_supernode_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RawPprofCpuRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -var file_supernode_service_proto_rawDesc = []byte{ - 0x0a, 0x17, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x1a, 0x16, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x4c, 0x69, - 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x22, 0x60, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x14, 0x0a, - 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x22, 0x3b, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, - 0x32, 0xd7, 0x01, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, - 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, - 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +func (*RawPprofCpuRequest) ProtoMessage() {} + +func (x *RawPprofCpuRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawPprofCpuRequest.ProtoReflect.Descriptor instead. +func (*RawPprofCpuRequest) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{4} +} + +func (x *RawPprofCpuRequest) GetSeconds() int32 { + if x != nil { + return x.Seconds + } + return 0 +} + +type RawPprofResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // Raw pprof data exactly as returned by runtime/pprof + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RawPprofResponse) Reset() { + *x = RawPprofResponse{} + mi := &file_supernode_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } +func (x *RawPprofResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawPprofResponse) ProtoMessage() {} + +func (x *RawPprofResponse) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawPprofResponse.ProtoReflect.Descriptor instead. +func (*RawPprofResponse) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{5} +} + +func (x *RawPprofResponse) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +var File_supernode_service_proto protoreflect.FileDescriptor + +const file_supernode_service_proto_rawDesc = "" + + "\n" + + "\x17supernode/service.proto\x12\tsupernode\x1a\x16supernode/status.proto\x1a\x1cgoogle/api/annotations.proto\"\x15\n" + + "\x13ListServicesRequest\"`\n" + + "\x14ListServicesResponse\x122\n" + + "\bservices\x18\x01 \x03(\v2\x16.supernode.ServiceInfoR\bservices\x12\x14\n" + + "\x05count\x18\x02 \x01(\x05R\x05count\";\n" + + "\vServiceInfo\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + + "\amethods\x18\x02 \x03(\tR\amethods\"'\n" + + "\x0fRawPprofRequest\x12\x14\n" + + "\x05debug\x18\x01 \x01(\x05R\x05debug\".\n" + + "\x12RawPprofCpuRequest\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x05R\aseconds\"&\n" + + "\x10RawPprofResponse\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data2\xec\v\n" + + "\x10SupernodeService\x12X\n" + + "\tGetStatus\x12\x18.supernode.StatusRequest\x1a\x19.supernode.StatusResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/status\x12i\n" + + "\fListServices\x12\x1e.supernode.ListServicesRequest\x1a\x1f.supernode.ListServicesResponse\"\x18\x82\xd3\xe4\x93\x02\x12\x12\x10/api/v1/services\x12g\n" + + "\vGetRawPprof\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"\x1f\x82\xd3\xe4\x93\x02\x19\x12\x17/api/v1/debug/raw/pprof\x12p\n" + + "\x0fGetRawPprofHeap\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"$\x82\xd3\xe4\x93\x02\x1e\x12\x1c/api/v1/debug/raw/pprof/heap\x12z\n" + + "\x14GetRawPprofGoroutine\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\")\x82\xd3\xe4\x93\x02#\x12!/api/v1/debug/raw/pprof/goroutine\x12t\n" + + "\x11GetRawPprofAllocs\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"&\x82\xd3\xe4\x93\x02 \x12\x1e/api/v1/debug/raw/pprof/allocs\x12r\n" + + "\x10GetRawPprofBlock\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/api/v1/debug/raw/pprof/block\x12r\n" + + "\x10GetRawPprofMutex\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/api/v1/debug/raw/pprof/mutex\x12\x80\x01\n" + + "\x17GetRawPprofThreadcreate\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\",\x82\xd3\xe4\x93\x02&\x12$/api/v1/debug/raw/pprof/threadcreate\x12y\n" + + "\x12GetRawPprofProfile\x12\x1d.supernode.RawPprofCpuRequest\x1a\x1b.supernode.RawPprofResponse\"'\x82\xd3\xe4\x93\x02!\x12\x1f/api/v1/debug/raw/pprof/profile\x12v\n" + + "\x12GetRawPprofCmdline\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"'\x82\xd3\xe4\x93\x02!\x12\x1f/api/v1/debug/raw/pprof/cmdline\x12t\n" + + "\x11GetRawPprofSymbol\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"&\x82\xd3\xe4\x93\x02 \x12\x1e/api/v1/debug/raw/pprof/symbol\x12r\n" + + "\x10GetRawPprofTrace\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/api/v1/debug/raw/pprof/traceB6Z4github.com/LumeraProtocol/supernode/v2/gen/supernodeb\x06proto3" + var ( file_supernode_service_proto_rawDescOnce sync.Once - file_supernode_service_proto_rawDescData = file_supernode_service_proto_rawDesc + file_supernode_service_proto_rawDescData []byte ) func file_supernode_service_proto_rawDescGZIP() []byte { file_supernode_service_proto_rawDescOnce.Do(func() { - file_supernode_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_service_proto_rawDescData) + file_supernode_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_supernode_service_proto_rawDesc), len(file_supernode_service_proto_rawDesc))) }) return file_supernode_service_proto_rawDescData } -var file_supernode_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_supernode_service_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_supernode_service_proto_goTypes = []any{ (*ListServicesRequest)(nil), // 0: supernode.ListServicesRequest (*ListServicesResponse)(nil), // 1: supernode.ListServicesResponse (*ServiceInfo)(nil), // 2: supernode.ServiceInfo - (*StatusRequest)(nil), // 3: supernode.StatusRequest - (*StatusResponse)(nil), // 4: supernode.StatusResponse + (*RawPprofRequest)(nil), // 3: supernode.RawPprofRequest + (*RawPprofCpuRequest)(nil), // 4: supernode.RawPprofCpuRequest + (*RawPprofResponse)(nil), // 5: supernode.RawPprofResponse + (*StatusRequest)(nil), // 6: supernode.StatusRequest + (*StatusResponse)(nil), // 7: supernode.StatusResponse } var file_supernode_service_proto_depIdxs = []int32{ - 2, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo - 3, // 1: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest - 0, // 2: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest - 4, // 3: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse - 1, // 4: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse - 3, // [3:5] is the sub-list for method output_type - 1, // [1:3] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo + 6, // 1: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest + 0, // 2: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest + 3, // 3: supernode.SupernodeService.GetRawPprof:input_type -> supernode.RawPprofRequest + 3, // 4: supernode.SupernodeService.GetRawPprofHeap:input_type -> supernode.RawPprofRequest + 3, // 5: supernode.SupernodeService.GetRawPprofGoroutine:input_type -> supernode.RawPprofRequest + 3, // 6: supernode.SupernodeService.GetRawPprofAllocs:input_type -> supernode.RawPprofRequest + 3, // 7: supernode.SupernodeService.GetRawPprofBlock:input_type -> supernode.RawPprofRequest + 3, // 8: supernode.SupernodeService.GetRawPprofMutex:input_type -> supernode.RawPprofRequest + 3, // 9: supernode.SupernodeService.GetRawPprofThreadcreate:input_type -> supernode.RawPprofRequest + 4, // 10: supernode.SupernodeService.GetRawPprofProfile:input_type -> supernode.RawPprofCpuRequest + 3, // 11: supernode.SupernodeService.GetRawPprofCmdline:input_type -> supernode.RawPprofRequest + 3, // 12: supernode.SupernodeService.GetRawPprofSymbol:input_type -> supernode.RawPprofRequest + 3, // 13: supernode.SupernodeService.GetRawPprofTrace:input_type -> supernode.RawPprofRequest + 7, // 14: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse + 1, // 15: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse + 5, // 16: supernode.SupernodeService.GetRawPprof:output_type -> supernode.RawPprofResponse + 5, // 17: supernode.SupernodeService.GetRawPprofHeap:output_type -> supernode.RawPprofResponse + 5, // 18: supernode.SupernodeService.GetRawPprofGoroutine:output_type -> supernode.RawPprofResponse + 5, // 19: supernode.SupernodeService.GetRawPprofAllocs:output_type -> supernode.RawPprofResponse + 5, // 20: supernode.SupernodeService.GetRawPprofBlock:output_type -> supernode.RawPprofResponse + 5, // 21: supernode.SupernodeService.GetRawPprofMutex:output_type -> supernode.RawPprofResponse + 5, // 22: supernode.SupernodeService.GetRawPprofThreadcreate:output_type -> supernode.RawPprofResponse + 5, // 23: supernode.SupernodeService.GetRawPprofProfile:output_type -> supernode.RawPprofResponse + 5, // 24: supernode.SupernodeService.GetRawPprofCmdline:output_type -> supernode.RawPprofResponse + 5, // 25: supernode.SupernodeService.GetRawPprofSymbol:output_type -> supernode.RawPprofResponse + 5, // 26: supernode.SupernodeService.GetRawPprofTrace:output_type -> supernode.RawPprofResponse + 14, // [14:27] is the sub-list for method output_type + 1, // [1:14] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_supernode_service_proto_init() } @@ -246,9 +396,9 @@ func file_supernode_service_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_supernode_service_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_supernode_service_proto_rawDesc), len(file_supernode_service_proto_rawDesc)), NumEnums: 0, - NumMessages: 3, + NumMessages: 6, NumExtensions: 0, NumServices: 1, }, @@ -257,7 +407,6 @@ func file_supernode_service_proto_init() { MessageInfos: file_supernode_service_proto_msgTypes, }.Build() File_supernode_service_proto = out.File - file_supernode_service_proto_rawDesc = nil file_supernode_service_proto_goTypes = nil file_supernode_service_proto_depIdxs = nil } diff --git a/gen/supernode/service.pb.gw.go b/gen/supernode/service.pb.gw.go index 326bccf3..93983b0f 100644 --- a/gen/supernode/service.pb.gw.go +++ b/gen/supernode/service.pb.gw.go @@ -13,15 +13,14 @@ import ( "io" "net/http" - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // Suppress "imported and not used" errors @@ -30,7 +29,6 @@ var _ io.Reader var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage var _ = metadata.Join var ( @@ -87,55 +85,730 @@ func local_request_SupernodeService_ListServices_0(ctx context.Context, marshale } +var ( + filter_SupernodeService_GetRawPprof_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprof_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprof_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprof(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprof_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprof_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprof(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofHeap_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofHeap_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofHeap(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofHeap_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofHeap(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofGoroutine_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofGoroutine_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofGoroutine(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofGoroutine_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofGoroutine(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofAllocs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofAllocs_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofAllocs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofAllocs_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofAllocs(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofBlock_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofBlock_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofBlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofBlock_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofBlock(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofMutex_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofMutex_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofMutex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofMutex_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofMutex(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofThreadcreate_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofThreadcreate_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofThreadcreate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofThreadcreate_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofThreadcreate(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofProfile_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofCpuRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofProfile_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofProfile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofCpuRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofProfile_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofProfile(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofCmdline_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofCmdline_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofCmdline_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofCmdline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofCmdline_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofCmdline_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofCmdline(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofSymbol_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofSymbol_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofSymbol_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofSymbol(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofSymbol_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofSymbol_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofSymbol(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofTrace_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofTrace_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofTrace_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofTrace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofTrace_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofTrace_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofTrace(ctx, &protoReq) + return msg, metadata, err + +} + // RegisterSupernodeServiceHandlerServer registers the http handlers for service SupernodeService to "mux". // UnaryRPC :call SupernodeServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. // Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterSupernodeServiceHandlerFromEndpoint instead. func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SupernodeServiceServer) error { - mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetStatus_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_ListServices_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprof_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofHeap_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofAllocs_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofAllocs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofBlock_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofBlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_GetStatus_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetRawPprofMutex_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofMutex_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } + resp, md, err := local_request_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } - forward_SupernodeService_GetStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofProfile_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofProfile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofCmdline_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofCmdline_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_ListServices_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetRawPprofSymbol_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofSymbol_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } + resp, md, err := local_request_SupernodeService_GetRawPprofTrace_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } - forward_SupernodeService_ListServices_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_SupernodeService_GetRawPprofTrace_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -145,21 +818,21 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser // RegisterSupernodeServiceHandlerFromEndpoint is same as RegisterSupernodeServiceHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterSupernodeServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() @@ -184,19 +857,21 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_SupernodeService_GetStatus_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_SupernodeService_GetStatus_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_SupernodeService_GetStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -204,19 +879,263 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_ListServices_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprof_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofHeap_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofAllocs_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofAllocs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofBlock_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofBlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofMutex_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofMutex_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofProfile_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofProfile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofCmdline_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofCmdline_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_SupernodeService_ListServices_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_SupernodeService_GetRawPprofSymbol_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofSymbol_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } + resp, md, err := request_SupernodeService_GetRawPprofTrace_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } - forward_SupernodeService_ListServices_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_SupernodeService_GetRawPprofTrace_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -224,13 +1143,57 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser } var ( - pattern_SupernodeService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_SupernodeService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "")) + + pattern_SupernodeService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "services"}, "")) + + pattern_SupernodeService_GetRawPprof_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "raw", "pprof"}, "")) + + pattern_SupernodeService_GetRawPprofHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "heap"}, "")) + + pattern_SupernodeService_GetRawPprofGoroutine_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "goroutine"}, "")) + + pattern_SupernodeService_GetRawPprofAllocs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "allocs"}, "")) + + pattern_SupernodeService_GetRawPprofBlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "block"}, "")) - pattern_SupernodeService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "services"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_SupernodeService_GetRawPprofMutex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "mutex"}, "")) + + pattern_SupernodeService_GetRawPprofThreadcreate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "threadcreate"}, "")) + + pattern_SupernodeService_GetRawPprofProfile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "profile"}, "")) + + pattern_SupernodeService_GetRawPprofCmdline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "cmdline"}, "")) + + pattern_SupernodeService_GetRawPprofSymbol_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "symbol"}, "")) + + pattern_SupernodeService_GetRawPprofTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "trace"}, "")) ) var ( forward_SupernodeService_GetStatus_0 = runtime.ForwardResponseMessage forward_SupernodeService_ListServices_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprof_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofHeap_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofGoroutine_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofAllocs_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofBlock_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofMutex_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofThreadcreate_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofProfile_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofCmdline_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofSymbol_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofTrace_0 = runtime.ForwardResponseMessage ) diff --git a/gen/supernode/service.swagger.json b/gen/supernode/service.swagger.json index 08140033..523499b8 100644 --- a/gen/supernode/service.swagger.json +++ b/gen/supernode/service.swagger.json @@ -16,6 +16,359 @@ "application/json" ], "paths": { + "/api/v1/debug/raw/pprof": { + "get": { + "summary": "Raw pprof endpoints - return standard pprof output directly", + "operationId": "SupernodeService_GetRawPprof", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/allocs": { + "get": { + "operationId": "SupernodeService_GetRawPprofAllocs", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/block": { + "get": { + "operationId": "SupernodeService_GetRawPprofBlock", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/cmdline": { + "get": { + "operationId": "SupernodeService_GetRawPprofCmdline", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/goroutine": { + "get": { + "operationId": "SupernodeService_GetRawPprofGoroutine", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/heap": { + "get": { + "operationId": "SupernodeService_GetRawPprofHeap", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/mutex": { + "get": { + "operationId": "SupernodeService_GetRawPprofMutex", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/profile": { + "get": { + "operationId": "SupernodeService_GetRawPprofProfile", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "seconds", + "description": "CPU profile duration in seconds (default 30)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/symbol": { + "get": { + "operationId": "SupernodeService_GetRawPprofSymbol", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/threadcreate": { + "get": { + "operationId": "SupernodeService_GetRawPprofThreadcreate", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/trace": { + "get": { + "operationId": "SupernodeService_GetRawPprofTrace", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, "/api/v1/services": { "get": { "operationId": "SupernodeService_ListServices", @@ -454,6 +807,16 @@ } } }, + "supernodeRawPprofResponse": { + "type": "object", + "properties": { + "data": { + "type": "string", + "format": "byte", + "title": "Raw pprof data exactly as returned by runtime/pprof" + } + } + }, "supernodeServiceInfo": { "type": "object", "properties": { diff --git a/gen/supernode/service_grpc.pb.go b/gen/supernode/service_grpc.pb.go index acb2e4c9..42857bf2 100644 --- a/gen/supernode/service_grpc.pb.go +++ b/gen/supernode/service_grpc.pb.go @@ -19,8 +19,19 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - SupernodeService_GetStatus_FullMethodName = "/supernode.SupernodeService/GetStatus" - SupernodeService_ListServices_FullMethodName = "/supernode.SupernodeService/ListServices" + SupernodeService_GetStatus_FullMethodName = "/supernode.SupernodeService/GetStatus" + SupernodeService_ListServices_FullMethodName = "/supernode.SupernodeService/ListServices" + SupernodeService_GetRawPprof_FullMethodName = "/supernode.SupernodeService/GetRawPprof" + SupernodeService_GetRawPprofHeap_FullMethodName = "/supernode.SupernodeService/GetRawPprofHeap" + SupernodeService_GetRawPprofGoroutine_FullMethodName = "/supernode.SupernodeService/GetRawPprofGoroutine" + SupernodeService_GetRawPprofAllocs_FullMethodName = "/supernode.SupernodeService/GetRawPprofAllocs" + SupernodeService_GetRawPprofBlock_FullMethodName = "/supernode.SupernodeService/GetRawPprofBlock" + SupernodeService_GetRawPprofMutex_FullMethodName = "/supernode.SupernodeService/GetRawPprofMutex" + SupernodeService_GetRawPprofThreadcreate_FullMethodName = "/supernode.SupernodeService/GetRawPprofThreadcreate" + SupernodeService_GetRawPprofProfile_FullMethodName = "/supernode.SupernodeService/GetRawPprofProfile" + SupernodeService_GetRawPprofCmdline_FullMethodName = "/supernode.SupernodeService/GetRawPprofCmdline" + SupernodeService_GetRawPprofSymbol_FullMethodName = "/supernode.SupernodeService/GetRawPprofSymbol" + SupernodeService_GetRawPprofTrace_FullMethodName = "/supernode.SupernodeService/GetRawPprofTrace" ) // SupernodeServiceClient is the client API for SupernodeService service. @@ -31,6 +42,18 @@ const ( type SupernodeServiceClient interface { GetStatus(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) + // Raw pprof endpoints - return standard pprof output directly + GetRawPprof(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofHeap(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofGoroutine(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofAllocs(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofBlock(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofMutex(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofThreadcreate(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofProfile(ctx context.Context, in *RawPprofCpuRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofCmdline(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofSymbol(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofTrace(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) } type supernodeServiceClient struct { @@ -61,6 +84,116 @@ func (c *supernodeServiceClient) ListServices(ctx context.Context, in *ListServi return out, nil } +func (c *supernodeServiceClient) GetRawPprof(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprof_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofHeap(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofHeap_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofGoroutine(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofGoroutine_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofAllocs(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofAllocs_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofBlock(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofBlock_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofMutex(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofMutex_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofThreadcreate(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofThreadcreate_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofProfile(ctx context.Context, in *RawPprofCpuRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofProfile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofCmdline(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofCmdline_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofSymbol(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofSymbol_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofTrace(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofTrace_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // SupernodeServiceServer is the server API for SupernodeService service. // All implementations must embed UnimplementedSupernodeServiceServer // for forward compatibility. @@ -69,6 +202,18 @@ func (c *supernodeServiceClient) ListServices(ctx context.Context, in *ListServi type SupernodeServiceServer interface { GetStatus(context.Context, *StatusRequest) (*StatusResponse, error) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) + // Raw pprof endpoints - return standard pprof output directly + GetRawPprof(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofHeap(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofGoroutine(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofAllocs(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofBlock(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofMutex(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofThreadcreate(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofProfile(context.Context, *RawPprofCpuRequest) (*RawPprofResponse, error) + GetRawPprofCmdline(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofSymbol(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofTrace(context.Context, *RawPprofRequest) (*RawPprofResponse, error) mustEmbedUnimplementedSupernodeServiceServer() } @@ -85,6 +230,39 @@ func (UnimplementedSupernodeServiceServer) GetStatus(context.Context, *StatusReq func (UnimplementedSupernodeServiceServer) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListServices not implemented") } +func (UnimplementedSupernodeServiceServer) GetRawPprof(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprof not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofHeap(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofHeap not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofGoroutine(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofGoroutine not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofAllocs(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofAllocs not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofBlock(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofBlock not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofMutex(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofMutex not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofThreadcreate(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofThreadcreate not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofProfile(context.Context, *RawPprofCpuRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofProfile not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofCmdline(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofCmdline not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofSymbol(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofSymbol not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofTrace(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofTrace not implemented") +} func (UnimplementedSupernodeServiceServer) mustEmbedUnimplementedSupernodeServiceServer() {} func (UnimplementedSupernodeServiceServer) testEmbeddedByValue() {} @@ -142,6 +320,204 @@ func _SupernodeService_ListServices_Handler(srv interface{}, ctx context.Context return interceptor(ctx, in, info, handler) } +func _SupernodeService_GetRawPprof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprof_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprof(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofHeap_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofHeap(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofHeap_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofHeap(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofGoroutine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofGoroutine(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofGoroutine_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofGoroutine(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofAllocs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofAllocs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofAllocs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofAllocs(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofBlock_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofBlock(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofMutex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofMutex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofMutex_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofMutex(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofThreadcreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofThreadcreate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofThreadcreate_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofThreadcreate(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofCpuRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofProfile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofProfile(ctx, req.(*RawPprofCpuRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofCmdline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofCmdline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofCmdline_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofCmdline(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofSymbol_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofSymbol(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofSymbol_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofSymbol(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofTrace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofTrace(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofTrace_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofTrace(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + // SupernodeService_ServiceDesc is the grpc.ServiceDesc for SupernodeService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -157,6 +533,50 @@ var SupernodeService_ServiceDesc = grpc.ServiceDesc{ MethodName: "ListServices", Handler: _SupernodeService_ListServices_Handler, }, + { + MethodName: "GetRawPprof", + Handler: _SupernodeService_GetRawPprof_Handler, + }, + { + MethodName: "GetRawPprofHeap", + Handler: _SupernodeService_GetRawPprofHeap_Handler, + }, + { + MethodName: "GetRawPprofGoroutine", + Handler: _SupernodeService_GetRawPprofGoroutine_Handler, + }, + { + MethodName: "GetRawPprofAllocs", + Handler: _SupernodeService_GetRawPprofAllocs_Handler, + }, + { + MethodName: "GetRawPprofBlock", + Handler: _SupernodeService_GetRawPprofBlock_Handler, + }, + { + MethodName: "GetRawPprofMutex", + Handler: _SupernodeService_GetRawPprofMutex_Handler, + }, + { + MethodName: "GetRawPprofThreadcreate", + Handler: _SupernodeService_GetRawPprofThreadcreate_Handler, + }, + { + MethodName: "GetRawPprofProfile", + Handler: _SupernodeService_GetRawPprofProfile_Handler, + }, + { + MethodName: "GetRawPprofCmdline", + Handler: _SupernodeService_GetRawPprofCmdline_Handler, + }, + { + MethodName: "GetRawPprofSymbol", + Handler: _SupernodeService_GetRawPprofSymbol_Handler, + }, + { + MethodName: "GetRawPprofTrace", + Handler: _SupernodeService_GetRawPprofTrace_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "supernode/service.proto", diff --git a/gen/supernode/status.pb.go b/gen/supernode/status.pb.go index 52045726..74e0d6d7 100644 --- a/gen/supernode/status.pb.go +++ b/gen/supernode/status.pb.go @@ -1,16 +1,18 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.21.12 // source: supernode/status.proto package supernode import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( @@ -22,13 +24,12 @@ const ( // StatusRequest controls optional metrics in the status response type StatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Optional: include detailed P2P metrics in the response // Maps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true IncludeP2PMetrics bool `protobuf:"varint,1,opt,name=include_p2p_metrics,json=includeP2pMetrics,proto3" json:"include_p2p_metrics,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatusRequest) Reset() { @@ -70,10 +71,7 @@ func (x *StatusRequest) GetIncludeP2PMetrics() bool { // The StatusResponse represents system status with clear organization type StatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Supernode version UptimeSeconds uint64 `protobuf:"varint,2,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` // Uptime in seconds Resources *StatusResponse_Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` @@ -83,6 +81,8 @@ type StatusResponse struct { Rank int32 `protobuf:"varint,7,opt,name=rank,proto3" json:"rank,omitempty"` // Rank in the top supernodes list (0 if not in top list) IpAddress string `protobuf:"bytes,8,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Supernode IP address with port (e.g., "192.168.1.1:4445") P2PMetrics *StatusResponse_P2PMetrics `protobuf:"bytes,9,opt,name=p2p_metrics,json=p2pMetrics,proto3" json:"p2p_metrics,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatusResponse) Reset() { @@ -180,14 +180,13 @@ func (x *StatusResponse) GetP2PMetrics() *StatusResponse_P2PMetrics { // System resource information type StatusResponse_Resources struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Cpu *StatusResponse_Resources_CPU `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"` Memory *StatusResponse_Resources_Memory `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` StorageVolumes []*StatusResponse_Resources_Storage `protobuf:"bytes,3,rep,name=storage_volumes,json=storageVolumes,proto3" json:"storage_volumes,omitempty"` HardwareSummary string `protobuf:"bytes,4,opt,name=hardware_summary,json=hardwareSummary,proto3" json:"hardware_summary,omitempty"` // Formatted hardware summary (e.g., "8 cores / 32GB RAM") + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatusResponse_Resources) Reset() { @@ -250,13 +249,12 @@ func (x *StatusResponse_Resources) GetHardwareSummary() string { // ServiceTasks contains task information for a specific service type StatusResponse_ServiceTasks struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + TaskIds []string `protobuf:"bytes,2,rep,name=task_ids,json=taskIds,proto3" json:"task_ids,omitempty"` + TaskCount int32 `protobuf:"varint,3,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"` unknownFields protoimpl.UnknownFields - - ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` - TaskIds []string `protobuf:"bytes,2,rep,name=task_ids,json=taskIds,proto3" json:"task_ids,omitempty"` - TaskCount int32 `protobuf:"varint,3,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"` + sizeCache protoimpl.SizeCache } func (x *StatusResponse_ServiceTasks) Reset() { @@ -312,12 +310,11 @@ func (x *StatusResponse_ServiceTasks) GetTaskCount() int32 { // Network information type StatusResponse_Network struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + PeersCount int32 `protobuf:"varint,1,opt,name=peers_count,json=peersCount,proto3" json:"peers_count,omitempty"` // Number of connected peers in P2P network + PeerAddresses []string `protobuf:"bytes,2,rep,name=peer_addresses,json=peerAddresses,proto3" json:"peer_addresses,omitempty"` // List of connected peer addresses (optional, may be empty for privacy) unknownFields protoimpl.UnknownFields - - PeersCount int32 `protobuf:"varint,1,opt,name=peers_count,json=peersCount,proto3" json:"peers_count,omitempty"` // Number of connected peers in P2P network - PeerAddresses []string `protobuf:"bytes,2,rep,name=peer_addresses,json=peerAddresses,proto3" json:"peer_addresses,omitempty"` // List of connected peer addresses (optional, may be empty for privacy) + sizeCache protoimpl.SizeCache } func (x *StatusResponse_Network) Reset() { @@ -366,16 +363,15 @@ func (x *StatusResponse_Network) GetPeerAddresses() []string { // P2P metrics and diagnostics (additive field) type StatusResponse_P2PMetrics struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` DhtMetrics *StatusResponse_P2PMetrics_DhtMetrics `protobuf:"bytes,1,opt,name=dht_metrics,json=dhtMetrics,proto3" json:"dht_metrics,omitempty"` - NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` BanList []*StatusResponse_P2PMetrics_BanEntry `protobuf:"bytes,4,rep,name=ban_list,json=banList,proto3" json:"ban_list,omitempty"` Database *StatusResponse_P2PMetrics_DatabaseStats `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` Disk *StatusResponse_P2PMetrics_DiskStatus `protobuf:"bytes,6,opt,name=disk,proto3" json:"disk,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics) Reset() { @@ -451,12 +447,11 @@ func (x *StatusResponse_P2PMetrics) GetDisk() *StatusResponse_P2PMetrics_DiskSta } type StatusResponse_Resources_CPU struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + UsagePercent float64 `protobuf:"fixed64,1,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // CPU usage percentage (0-100) + Cores int32 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores,omitempty"` // Number of CPU cores unknownFields protoimpl.UnknownFields - - UsagePercent float64 `protobuf:"fixed64,1,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // CPU usage percentage (0-100) - Cores int32 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores,omitempty"` // Number of CPU cores + sizeCache protoimpl.SizeCache } func (x *StatusResponse_Resources_CPU) Reset() { @@ -504,14 +499,13 @@ func (x *StatusResponse_Resources_CPU) GetCores() int32 { } type StatusResponse_Resources_Memory struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + TotalGb float64 `protobuf:"fixed64,1,opt,name=total_gb,json=totalGb,proto3" json:"total_gb,omitempty"` // Total memory in GB + UsedGb float64 `protobuf:"fixed64,2,opt,name=used_gb,json=usedGb,proto3" json:"used_gb,omitempty"` // Used memory in GB + AvailableGb float64 `protobuf:"fixed64,3,opt,name=available_gb,json=availableGb,proto3" json:"available_gb,omitempty"` // Available memory in GB + UsagePercent float64 `protobuf:"fixed64,4,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Memory usage percentage (0-100) unknownFields protoimpl.UnknownFields - - TotalGb float64 `protobuf:"fixed64,1,opt,name=total_gb,json=totalGb,proto3" json:"total_gb,omitempty"` // Total memory in GB - UsedGb float64 `protobuf:"fixed64,2,opt,name=used_gb,json=usedGb,proto3" json:"used_gb,omitempty"` // Used memory in GB - AvailableGb float64 `protobuf:"fixed64,3,opt,name=available_gb,json=availableGb,proto3" json:"available_gb,omitempty"` // Available memory in GB - UsagePercent float64 `protobuf:"fixed64,4,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Memory usage percentage (0-100) + sizeCache protoimpl.SizeCache } func (x *StatusResponse_Resources_Memory) Reset() { @@ -573,15 +567,14 @@ func (x *StatusResponse_Resources_Memory) GetUsagePercent() float64 { } type StatusResponse_Resources_Storage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Storage path being monitored - TotalBytes uint64 `protobuf:"varint,2,opt,name=total_bytes,json=totalBytes,proto3" json:"total_bytes,omitempty"` - UsedBytes uint64 `protobuf:"varint,3,opt,name=used_bytes,json=usedBytes,proto3" json:"used_bytes,omitempty"` - AvailableBytes uint64 `protobuf:"varint,4,opt,name=available_bytes,json=availableBytes,proto3" json:"available_bytes,omitempty"` - UsagePercent float64 `protobuf:"fixed64,5,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Storage usage percentage (0-100) + state protoimpl.MessageState `protogen:"open.v1"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Storage path being monitored + TotalBytes uint64 `protobuf:"varint,2,opt,name=total_bytes,json=totalBytes,proto3" json:"total_bytes,omitempty"` + UsedBytes uint64 `protobuf:"varint,3,opt,name=used_bytes,json=usedBytes,proto3" json:"used_bytes,omitempty"` + AvailableBytes uint64 `protobuf:"varint,4,opt,name=available_bytes,json=availableBytes,proto3" json:"available_bytes,omitempty"` + UsagePercent float64 `protobuf:"fixed64,5,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Storage usage percentage (0-100) + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatusResponse_Resources_Storage) Reset() { @@ -651,14 +644,13 @@ func (x *StatusResponse_Resources_Storage) GetUsagePercent() float64 { // Rolling DHT metrics snapshot type StatusResponse_P2PMetrics_DhtMetrics struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` StoreSuccessRecent []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint `protobuf:"bytes,1,rep,name=store_success_recent,json=storeSuccessRecent,proto3" json:"store_success_recent,omitempty"` BatchRetrieveRecent []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint `protobuf:"bytes,2,rep,name=batch_retrieve_recent,json=batchRetrieveRecent,proto3" json:"batch_retrieve_recent,omitempty"` HotPathBannedSkips int64 `protobuf:"varint,3,opt,name=hot_path_banned_skips,json=hotPathBannedSkips,proto3" json:"hot_path_banned_skips,omitempty"` // counter HotPathBanIncrements int64 `protobuf:"varint,4,opt,name=hot_path_ban_increments,json=hotPathBanIncrements,proto3" json:"hot_path_ban_increments,omitempty"` // counter + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_DhtMetrics) Reset() { @@ -721,14 +713,13 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBanIncrements() int64 { // Per-handler counters from network layer type StatusResponse_P2PMetrics_HandleCounters struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Success int64 `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + Failure int64 `protobuf:"varint,3,opt,name=failure,proto3" json:"failure,omitempty"` + Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` unknownFields protoimpl.UnknownFields - - Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` - Success int64 `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` - Failure int64 `protobuf:"varint,3,opt,name=failure,proto3" json:"failure,omitempty"` - Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_HandleCounters) Reset() { @@ -791,16 +782,15 @@ func (x *StatusResponse_P2PMetrics_HandleCounters) GetTimeout() int64 { // Ban list entry type StatusResponse_P2PMetrics_BanEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // printable ID + Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` // last seen IP + Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // last seen port + Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` // failure count + CreatedAtUnix int64 `protobuf:"varint,5,opt,name=created_at_unix,json=createdAtUnix,proto3" json:"created_at_unix,omitempty"` // first ban time (unix seconds) + AgeSeconds int64 `protobuf:"varint,6,opt,name=age_seconds,json=ageSeconds,proto3" json:"age_seconds,omitempty"` // age in seconds unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // printable ID - Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` // last seen IP - Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // last seen port - Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` // failure count - CreatedAtUnix int64 `protobuf:"varint,5,opt,name=created_at_unix,json=createdAtUnix,proto3" json:"created_at_unix,omitempty"` // first ban time (unix seconds) - AgeSeconds int64 `protobuf:"varint,6,opt,name=age_seconds,json=ageSeconds,proto3" json:"age_seconds,omitempty"` // age in seconds + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_BanEntry) Reset() { @@ -877,12 +867,11 @@ func (x *StatusResponse_P2PMetrics_BanEntry) GetAgeSeconds() int64 { // DB stats type StatusResponse_P2PMetrics_DatabaseStats struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - P2PDbSizeMb float64 `protobuf:"fixed64,1,opt,name=p2p_db_size_mb,json=p2pDbSizeMb,proto3" json:"p2p_db_size_mb,omitempty"` - P2PDbRecordsCount int64 `protobuf:"varint,2,opt,name=p2p_db_records_count,json=p2pDbRecordsCount,proto3" json:"p2p_db_records_count,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + P2PDbSizeMb float64 `protobuf:"fixed64,1,opt,name=p2p_db_size_mb,json=p2pDbSizeMb,proto3" json:"p2p_db_size_mb,omitempty"` + P2PDbRecordsCount int64 `protobuf:"varint,2,opt,name=p2p_db_records_count,json=p2pDbRecordsCount,proto3" json:"p2p_db_records_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_DatabaseStats) Reset() { @@ -931,13 +920,12 @@ func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbRecordsCount() int64 { // Disk status type StatusResponse_P2PMetrics_DiskStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + AllMb float64 `protobuf:"fixed64,1,opt,name=all_mb,json=allMb,proto3" json:"all_mb,omitempty"` + UsedMb float64 `protobuf:"fixed64,2,opt,name=used_mb,json=usedMb,proto3" json:"used_mb,omitempty"` + FreeMb float64 `protobuf:"fixed64,3,opt,name=free_mb,json=freeMb,proto3" json:"free_mb,omitempty"` unknownFields protoimpl.UnknownFields - - AllMb float64 `protobuf:"fixed64,1,opt,name=all_mb,json=allMb,proto3" json:"all_mb,omitempty"` - UsedMb float64 `protobuf:"fixed64,2,opt,name=used_mb,json=usedMb,proto3" json:"used_mb,omitempty"` - FreeMb float64 `protobuf:"fixed64,3,opt,name=free_mb,json=freeMb,proto3" json:"free_mb,omitempty"` + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_DiskStatus) Reset() { @@ -992,14 +980,13 @@ func (x *StatusResponse_P2PMetrics_DiskStatus) GetFreeMb() float64 { } type StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) + Requests int32 `protobuf:"varint,2,opt,name=requests,proto3" json:"requests,omitempty"` // total node RPCs attempted + Successful int32 `protobuf:"varint,3,opt,name=successful,proto3" json:"successful,omitempty"` // successful node RPCs + SuccessRate float64 `protobuf:"fixed64,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` // percentage (0-100) unknownFields protoimpl.UnknownFields - - TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) - Requests int32 `protobuf:"varint,2,opt,name=requests,proto3" json:"requests,omitempty"` // total node RPCs attempted - Successful int32 `protobuf:"varint,3,opt,name=successful,proto3" json:"successful,omitempty"` // successful node RPCs - SuccessRate float64 `protobuf:"fixed64,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` // percentage (0-100) + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Reset() { @@ -1061,16 +1048,15 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessRate( } type StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) + Keys int32 `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` // keys requested + Required int32 `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` // required count + FoundLocal int32 `protobuf:"varint,4,opt,name=found_local,json=foundLocal,proto3" json:"found_local,omitempty"` // found locally + FoundNetwork int32 `protobuf:"varint,5,opt,name=found_network,json=foundNetwork,proto3" json:"found_network,omitempty"` // found on network + DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` // duration in milliseconds unknownFields protoimpl.UnknownFields - - TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) - Keys int32 `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` // keys requested - Required int32 `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` // required count - FoundLocal int32 `protobuf:"varint,4,opt,name=found_local,json=foundLocal,proto3" json:"found_local,omitempty"` // found locally - FoundNetwork int32 `protobuf:"varint,5,opt,name=found_network,json=foundNetwork,proto3" json:"found_network,omitempty"` // found on network - DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` // duration in milliseconds + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Reset() { @@ -1147,228 +1133,120 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetDurationMs( var File_supernode_status_proto protoreflect.FileDescriptor -var file_supernode_status_proto_rawDesc = []byte{ - 0x0a, 0x16, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x22, 0x3f, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, - 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x32, 0x70, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x22, 0x84, 0x19, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, - 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x75, 0x70, 0x74, 0x69, 0x6d, - 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0d, 0x72, - 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x0c, 0x72, 0x75, 0x6e, 0x6e, - 0x69, 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x72, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, - 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x6e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x07, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x70, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x70, 0x32, 0x70, - 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x70, 0x32, 0x70, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x1a, 0x82, 0x05, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x39, - 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, 0x42, 0x0a, 0x06, 0x6d, 0x65, 0x6d, - 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x73, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x4d, - 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x54, 0x0a, - 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x5f, - 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x68, - 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x1a, 0x40, - 0x0a, 0x03, 0x43, 0x50, 0x55, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, - 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, - 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, - 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, - 0x1a, 0x84, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x67, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x47, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x67, - 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x47, 0x62, 0x12, - 0x21, 0x0a, 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x67, 0x62, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, - 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, - 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, - 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0xab, 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x75, 0x73, - 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x76, 0x61, 0x69, 0x6c, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x0e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x6b, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x73, 0x6b, - 0x49, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x1a, 0x51, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, - 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, - 0x0a, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0xf3, 0x0e, 0x0a, 0x0a, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x12, 0x50, 0x0a, 0x0b, 0x64, 0x68, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, - 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x64, 0x68, 0x74, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x74, 0x0a, 0x16, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x65, 0x0a, 0x11, - 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x43, 0x6f, - 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x12, 0x48, 0x0a, 0x08, 0x62, 0x61, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x6e, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x61, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4e, 0x0a, - 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x32, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x43, 0x0a, - 0x04, 0x64, 0x69, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x04, 0x64, 0x69, - 0x73, 0x6b, 0x1a, 0xc0, 0x05, 0x0a, 0x0a, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x12, 0x73, 0x0a, 0x14, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x41, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, - 0x6e, 0x74, 0x52, 0x12, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x76, 0x0a, 0x15, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, - 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, - 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x13, 0x62, 0x61, 0x74, 0x63, 0x68, - 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x31, - 0x0a, 0x15, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x6e, 0x65, - 0x64, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x68, - 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x6b, 0x69, 0x70, - 0x73, 0x12, 0x35, 0x0a, 0x17, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, - 0x6e, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x14, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x49, 0x6e, - 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x8f, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f, - 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x66, 0x75, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x73, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x1a, 0xc8, 0x01, 0x0a, 0x12, 0x42, - 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, - 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x12, - 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, - 0x79, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1f, - 0x0a, 0x0b, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12, - 0x23, 0x0a, 0x0d, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x1a, 0x74, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, - 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x1a, 0x9d, 0x01, 0x0a, 0x08, - 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, - 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0a, 0x61, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x65, 0x0a, 0x0d, 0x44, - 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0e, - 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x70, 0x32, 0x70, 0x44, 0x62, 0x53, 0x69, 0x7a, 0x65, 0x4d, - 0x62, 0x12, 0x2f, 0x0a, 0x14, 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x11, 0x70, 0x32, 0x70, 0x44, 0x62, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x1a, 0x55, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x15, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4d, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, - 0x6d, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x4d, 0x62, - 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x06, 0x66, 0x72, 0x65, 0x65, 0x4d, 0x62, 0x1a, 0x7c, 0x0a, 0x19, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x48, 0x61, - 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x50, - 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_supernode_status_proto_rawDesc = "" + + "\n" + + "\x16supernode/status.proto\x12\tsupernode\"?\n" + + "\rStatusRequest\x12.\n" + + "\x13include_p2p_metrics\x18\x01 \x01(\bR\x11includeP2pMetrics\"\x84\x19\n" + + "\x0eStatusResponse\x12\x18\n" + + "\aversion\x18\x01 \x01(\tR\aversion\x12%\n" + + "\x0euptime_seconds\x18\x02 \x01(\x04R\ruptimeSeconds\x12A\n" + + "\tresources\x18\x03 \x01(\v2#.supernode.StatusResponse.ResourcesR\tresources\x12K\n" + + "\rrunning_tasks\x18\x04 \x03(\v2&.supernode.StatusResponse.ServiceTasksR\frunningTasks\x12/\n" + + "\x13registered_services\x18\x05 \x03(\tR\x12registeredServices\x12;\n" + + "\anetwork\x18\x06 \x01(\v2!.supernode.StatusResponse.NetworkR\anetwork\x12\x12\n" + + "\x04rank\x18\a \x01(\x05R\x04rank\x12\x1d\n" + + "\n" + + "ip_address\x18\b \x01(\tR\tipAddress\x12E\n" + + "\vp2p_metrics\x18\t \x01(\v2$.supernode.StatusResponse.P2PMetricsR\n" + + "p2pMetrics\x1a\x82\x05\n" + + "\tResources\x129\n" + + "\x03cpu\x18\x01 \x01(\v2'.supernode.StatusResponse.Resources.CPUR\x03cpu\x12B\n" + + "\x06memory\x18\x02 \x01(\v2*.supernode.StatusResponse.Resources.MemoryR\x06memory\x12T\n" + + "\x0fstorage_volumes\x18\x03 \x03(\v2+.supernode.StatusResponse.Resources.StorageR\x0estorageVolumes\x12)\n" + + "\x10hardware_summary\x18\x04 \x01(\tR\x0fhardwareSummary\x1a@\n" + + "\x03CPU\x12#\n" + + "\rusage_percent\x18\x01 \x01(\x01R\fusagePercent\x12\x14\n" + + "\x05cores\x18\x02 \x01(\x05R\x05cores\x1a\x84\x01\n" + + "\x06Memory\x12\x19\n" + + "\btotal_gb\x18\x01 \x01(\x01R\atotalGb\x12\x17\n" + + "\aused_gb\x18\x02 \x01(\x01R\x06usedGb\x12!\n" + + "\favailable_gb\x18\x03 \x01(\x01R\vavailableGb\x12#\n" + + "\rusage_percent\x18\x04 \x01(\x01R\fusagePercent\x1a\xab\x01\n" + + "\aStorage\x12\x12\n" + + "\x04path\x18\x01 \x01(\tR\x04path\x12\x1f\n" + + "\vtotal_bytes\x18\x02 \x01(\x04R\n" + + "totalBytes\x12\x1d\n" + + "\n" + + "used_bytes\x18\x03 \x01(\x04R\tusedBytes\x12'\n" + + "\x0favailable_bytes\x18\x04 \x01(\x04R\x0eavailableBytes\x12#\n" + + "\rusage_percent\x18\x05 \x01(\x01R\fusagePercent\x1ak\n" + + "\fServiceTasks\x12!\n" + + "\fservice_name\x18\x01 \x01(\tR\vserviceName\x12\x19\n" + + "\btask_ids\x18\x02 \x03(\tR\ataskIds\x12\x1d\n" + + "\n" + + "task_count\x18\x03 \x01(\x05R\ttaskCount\x1aQ\n" + + "\aNetwork\x12\x1f\n" + + "\vpeers_count\x18\x01 \x01(\x05R\n" + + "peersCount\x12%\n" + + "\x0epeer_addresses\x18\x02 \x03(\tR\rpeerAddresses\x1a\xf3\x0e\n" + + "\n" + + "P2PMetrics\x12P\n" + + "\vdht_metrics\x18\x01 \x01(\v2/.supernode.StatusResponse.P2PMetrics.DhtMetricsR\n" + + "dhtMetrics\x12t\n" + + "\x16network_handle_metrics\x18\x02 \x03(\v2>.supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntryR\x14networkHandleMetrics\x12e\n" + + "\x11conn_pool_metrics\x18\x03 \x03(\v29.supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntryR\x0fconnPoolMetrics\x12H\n" + + "\bban_list\x18\x04 \x03(\v2-.supernode.StatusResponse.P2PMetrics.BanEntryR\abanList\x12N\n" + + "\bdatabase\x18\x05 \x01(\v22.supernode.StatusResponse.P2PMetrics.DatabaseStatsR\bdatabase\x12C\n" + + "\x04disk\x18\x06 \x01(\v2/.supernode.StatusResponse.P2PMetrics.DiskStatusR\x04disk\x1a\xc0\x05\n" + + "\n" + + "DhtMetrics\x12s\n" + + "\x14store_success_recent\x18\x01 \x03(\v2A.supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPointR\x12storeSuccessRecent\x12v\n" + + "\x15batch_retrieve_recent\x18\x02 \x03(\v2B.supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePointR\x13batchRetrieveRecent\x121\n" + + "\x15hot_path_banned_skips\x18\x03 \x01(\x03R\x12hotPathBannedSkips\x125\n" + + "\x17hot_path_ban_increments\x18\x04 \x01(\x03R\x14hotPathBanIncrements\x1a\x8f\x01\n" + + "\x11StoreSuccessPoint\x12\x1b\n" + + "\ttime_unix\x18\x01 \x01(\x03R\btimeUnix\x12\x1a\n" + + "\brequests\x18\x02 \x01(\x05R\brequests\x12\x1e\n" + + "\n" + + "successful\x18\x03 \x01(\x05R\n" + + "successful\x12!\n" + + "\fsuccess_rate\x18\x04 \x01(\x01R\vsuccessRate\x1a\xc8\x01\n" + + "\x12BatchRetrievePoint\x12\x1b\n" + + "\ttime_unix\x18\x01 \x01(\x03R\btimeUnix\x12\x12\n" + + "\x04keys\x18\x02 \x01(\x05R\x04keys\x12\x1a\n" + + "\brequired\x18\x03 \x01(\x05R\brequired\x12\x1f\n" + + "\vfound_local\x18\x04 \x01(\x05R\n" + + "foundLocal\x12#\n" + + "\rfound_network\x18\x05 \x01(\x05R\ffoundNetwork\x12\x1f\n" + + "\vduration_ms\x18\x06 \x01(\x03R\n" + + "durationMs\x1at\n" + + "\x0eHandleCounters\x12\x14\n" + + "\x05total\x18\x01 \x01(\x03R\x05total\x12\x18\n" + + "\asuccess\x18\x02 \x01(\x03R\asuccess\x12\x18\n" + + "\afailure\x18\x03 \x01(\x03R\afailure\x12\x18\n" + + "\atimeout\x18\x04 \x01(\x03R\atimeout\x1a\x9d\x01\n" + + "\bBanEntry\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x0e\n" + + "\x02ip\x18\x02 \x01(\tR\x02ip\x12\x12\n" + + "\x04port\x18\x03 \x01(\rR\x04port\x12\x14\n" + + "\x05count\x18\x04 \x01(\x05R\x05count\x12&\n" + + "\x0fcreated_at_unix\x18\x05 \x01(\x03R\rcreatedAtUnix\x12\x1f\n" + + "\vage_seconds\x18\x06 \x01(\x03R\n" + + "ageSeconds\x1ae\n" + + "\rDatabaseStats\x12#\n" + + "\x0ep2p_db_size_mb\x18\x01 \x01(\x01R\vp2pDbSizeMb\x12/\n" + + "\x14p2p_db_records_count\x18\x02 \x01(\x03R\x11p2pDbRecordsCount\x1aU\n" + + "\n" + + "DiskStatus\x12\x15\n" + + "\x06all_mb\x18\x01 \x01(\x01R\x05allMb\x12\x17\n" + + "\aused_mb\x18\x02 \x01(\x01R\x06usedMb\x12\x17\n" + + "\afree_mb\x18\x03 \x01(\x01R\x06freeMb\x1a|\n" + + "\x19NetworkHandleMetricsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12I\n" + + "\x05value\x18\x02 \x01(\v23.supernode.StatusResponse.P2PMetrics.HandleCountersR\x05value:\x028\x01\x1aB\n" + + "\x14ConnPoolMetricsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\x03R\x05value:\x028\x01B6Z4github.com/LumeraProtocol/supernode/v2/gen/supernodeb\x06proto3" var ( file_supernode_status_proto_rawDescOnce sync.Once - file_supernode_status_proto_rawDescData = file_supernode_status_proto_rawDesc + file_supernode_status_proto_rawDescData []byte ) func file_supernode_status_proto_rawDescGZIP() []byte { file_supernode_status_proto_rawDescOnce.Do(func() { - file_supernode_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_status_proto_rawDescData) + file_supernode_status_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_supernode_status_proto_rawDesc), len(file_supernode_status_proto_rawDesc))) }) return file_supernode_status_proto_rawDescData } @@ -1427,7 +1305,7 @@ func file_supernode_status_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_supernode_status_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_supernode_status_proto_rawDesc), len(file_supernode_status_proto_rawDesc)), NumEnums: 0, NumMessages: 18, NumExtensions: 0, @@ -1438,7 +1316,6 @@ func file_supernode_status_proto_init() { MessageInfos: file_supernode_status_proto_msgTypes, }.Build() File_supernode_status_proto = out.File - file_supernode_status_proto_rawDesc = nil file_supernode_status_proto_goTypes = nil file_supernode_status_proto_depIdxs = nil } diff --git a/go.mod b/go.mod index a581736e..4b484c0c 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 github.com/google/uuid v1.6.0 - github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 github.com/jmoiron/sqlx v1.4.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.18.0 @@ -113,6 +113,7 @@ require ( github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect diff --git a/go.sum b/go.sum index 839f29a2..ed0db1f6 100644 --- a/go.sum +++ b/go.sum @@ -415,6 +415,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index 62aa2768..13615deb 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -17,6 +17,7 @@ import ( "github.com/btcsuite/btcutil/base58" "github.com/cenkalti/backoff/v4" "github.com/cosmos/cosmos-sdk/crypto/keyring" + "golang.org/x/sync/semaphore" "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" "github.com/LumeraProtocol/supernode/v2/pkg/errors" @@ -498,9 +499,10 @@ func (s *DHT) newMessage(messageType int, receiver *Node, data interface{}) *Mes } sender := &Node{ - IP: hostIP, - ID: s.ht.self.ID, - Port: s.ht.self.Port, + IP: hostIP, + ID: s.ht.self.ID, + Port: s.ht.self.Port, + Version: localVersion(), } return &Message{ Sender: sender, @@ -687,11 +689,7 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, var foundLocalCount int32 hexKeys := make([]string, len(keys)) - globalClosestContacts := make(map[string]*NodeList) hashes := make([][]byte, len(keys)) - knownNodes := make(map[string]*Node) - var knownMu sync.Mutex - var closestMu sync.RWMutex defer func() { resMap.Range(func(key, value interface{}) bool { @@ -715,15 +713,6 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, } }() - for _, key := range keys { - result[key] = nil - } - - supernodeAddr, _ := s.getSupernodeAddress(ctx) - hostIP := parseSupernodeAddress(supernodeAddr) - self := &Node{ID: s.ht.self.ID, IP: hostIP, Port: s.ht.self.Port} - self.SetHashedID() - for i, key := range keys { decoded := base58.Decode(key) if len(decoded) != B/8 { @@ -731,16 +720,60 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, } hashes[i] = decoded hexKeys[i] = hex.EncodeToString(decoded) + result[key] = nil } + foundLocalCount, err = s.fetchAndAddLocalKeys(ctx, hexKeys, &resMap, required) + if err != nil { + return nil, fmt.Errorf("fetch and add local keys: %v", err) + } + // Found locally count is logged via summary below; no external metrics + + if foundLocalCount >= required { + logtrace.Debug(ctx, "DHT BatchRetrieve satisfied from local storage", logtrace.Fields{ + "txid": txID, "found_local": foundLocalCount, "required": required, + }) + return result, nil + } + + if len(localOnly) > 0 && localOnly[0] { + logtrace.Debug(ctx, "DHT BatchRetrieve local-only mode, insufficient keys", logtrace.Fields{ + "txid": txID, "found_local": foundLocalCount, "required": required, + }) + return result, fmt.Errorf("local-only: found %d, required %d", foundLocalCount, required) + } + + supernodeAddr, addrErr := s.getSupernodeAddress(ctx) + if addrErr != nil { + logtrace.Warn(ctx, "Failed to get supernode address", logtrace.Fields{ + logtrace.FieldModule: "dht", + logtrace.FieldError: addrErr.Error(), + }) + } + hostIP := parseSupernodeAddress(supernodeAddr) + self := &Node{ID: s.ht.self.ID, IP: hostIP, Port: s.ht.self.Port} + self.SetHashedID() + + knownNodes := make(map[string]*Node) + var knownMu sync.Mutex + for _, n := range s.ht.nodes() { nn := &Node{ID: n.ID, IP: n.IP, Port: n.Port} nn.SetHashedID() knownNodes[string(nn.ID)] = nn } + ignoreList := s.ignorelist.ToNodeList() + + globalClosestContacts := make(map[string]*NodeList) + var closestMu sync.RWMutex + for i := range keys { - top6 := s.ht.closestContactsWithIncludingNode(Alpha, hashes[i], s.ignorelist.ToNodeList(), nil) + if _, found := resMap.Load(hexKeys[i]); found { + continue + } + + top6 := s.ht.closestContactsWithIncludingNode(Alpha, hashes[i], ignoreList, nil) closestMu.Lock() globalClosestContacts[keys[i]] = top6 closestMu.Unlock() @@ -749,21 +782,12 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, delete(knownNodes, string(self.ID)) - foundLocalCount, err = s.fetchAndAddLocalKeys(ctx, hexKeys, &resMap, required) - if err != nil { - return nil, fmt.Errorf("fetch and add local keys: %v", err) - } - // Found locally count is logged via summary below; no external metrics - if foundLocalCount >= required { - return result, nil - } - batchSize := batchRetrieveSize var networkFound int32 totalBatches := int(math.Ceil(float64(required) / float64(batchSize))) parallelBatches := int(math.Min(float64(totalBatches), float64(fetchSymbolsBatchConcurrency))) - semaphore := make(chan struct{}, parallelBatches) + sem := semaphore.NewWeighted(int64(parallelBatches)) var wg sync.WaitGroup gctx, cancel := context.WithCancel(ctx) defer cancel() @@ -776,27 +800,39 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, if end > len(keys) { end = len(keys) } + if atomic.LoadInt32(&networkFound)+int32(foundLocalCount) >= int32(required) { break } wg.Add(1) - semaphore <- struct{}{} - go s.processBatch( - gctx, - keys[start:end], - hexKeys[start:end], - semaphore, &wg, - globalClosestContacts, - &closestMu, - knownNodes, &knownMu, - &resMap, - required, - foundLocalCount, - &networkFound, - cancel, - txID, - ) + go func(start, end int) { + defer wg.Done() + + if err := sem.Acquire(gctx, 1); err != nil { + return + } + defer sem.Release(1) + + if atomic.LoadInt32(&networkFound)+int32(foundLocalCount) >= int32(required) { + return + } + + s.processBatch( + gctx, + keys[start:end], + hexKeys[start:end], + globalClosestContacts, + &closestMu, + knownNodes, &knownMu, + &resMap, + required, + foundLocalCount, + &networkFound, + cancel, + txID, + ) + }(start, end) } wg.Wait() @@ -820,8 +856,6 @@ func (s *DHT) processBatch( ctx context.Context, batchKeys []string, batchHexKeys []string, - semaphore chan struct{}, - wg *sync.WaitGroup, globalClosestContacts map[string]*NodeList, closestMu *sync.RWMutex, knownNodes map[string]*Node, @@ -833,94 +867,64 @@ func (s *DHT) processBatch( cancel context.CancelFunc, txID string, ) { - defer wg.Done() - defer func() { <-semaphore }() + select { + case <-ctx.Done(): + return + default: + } - for i := 0; i < maxIterations; i++ { - select { - case <-ctx.Done(): - return - default: - } + fetchMap := make(map[string][]int) - // Build fetch map (read globalClosestContacts under RLock) - fetchMap := make(map[string][]int) - for i, key := range batchKeys { - closestMu.RLock() - nl := globalClosestContacts[key] - closestMu.RUnlock() - if nl == nil { - continue - } - for _, node := range nl.Nodes { - nodeID := string(node.ID) - fetchMap[nodeID] = append(fetchMap[nodeID], i) - } - } + closestMu.RLock() + localContacts := make(map[string]*NodeList, len(batchKeys)) + for _, key := range batchKeys { + localContacts[key] = globalClosestContacts[key] + } + closestMu.RUnlock() - foundCount, newClosestContacts, batchErr := s.iterateBatchGetValues( - ctx, knownNodes, batchKeys, batchHexKeys, fetchMap, resMap, required, foundLocalCount+atomic.LoadInt32(networkFound), - ) - if batchErr != nil { - logtrace.Error(ctx, "Iterate batch get values failed", logtrace.Fields{ - logtrace.FieldModule: "dht", "txid": txID, logtrace.FieldError: batchErr.Error(), - }) + for idx, key := range batchKeys { + nl := localContacts[key] + if nl == nil { + continue } - - atomic.AddInt32(networkFound, int32(foundCount)) - if atomic.LoadInt32(networkFound)+int32(foundLocalCount) >= int32(required) { - cancel() - break + for _, node := range nl.Nodes { + nodeID := string(node.ID) + fetchMap[nodeID] = append(fetchMap[nodeID], idx) } + } - changed := false - for key, nodesList := range newClosestContacts { - if nodesList == nil || nodesList.Nodes == nil { - continue - } - - closestMu.RLock() - curr := globalClosestContacts[key] - closestMu.RUnlock() - if curr == nil || curr.Nodes == nil { - logtrace.Warn(ctx, "Global contacts missing key during merge", logtrace.Fields{"key": key}) - continue - } - - if !haveAllNodes(nodesList.Nodes, curr.Nodes) { - changed = true - } - - nodesList.AddNodes(curr.Nodes) - nodesList.Sort() - nodesList.TopN(Alpha) - - s.addKnownNodesSafe(ctx, nodesList.Nodes, knownNodes, knownMu) - - closestMu.Lock() - globalClosestContacts[key] = nodesList - closestMu.Unlock() - } + foundCount, batchErr := s.iterateBatchGetValues( + ctx, knownNodes, batchHexKeys, fetchMap, resMap, required, foundLocalCount+atomic.LoadInt32(networkFound), + ) + if batchErr != nil { + logtrace.Error(ctx, "Iterate batch get values failed", logtrace.Fields{ + logtrace.FieldModule: "dht", "txid": txID, logtrace.FieldError: batchErr.Error(), + }) + } - if !changed { - break - } + atomic.AddInt32(networkFound, int32(foundCount)) + if atomic.LoadInt32(networkFound)+int32(foundLocalCount) >= int32(required) { + cancel() } } -func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, keys []string, hexKeys []string, fetchMap map[string][]int, - resMap *sync.Map, req, alreadyFound int32) (int, map[string]*NodeList, error) { - semaphore := make(chan struct{}, storeSameSymbolsBatchConcurrency) // Limit concurrency to 1 - closestContacts := make(map[string]*NodeList) +func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, hexKeys []string, fetchMap map[string][]int, + resMap *sync.Map, req, alreadyFound int32) (int, error) { + sem := semaphore.NewWeighted(int64(storeSameSymbolsBatchConcurrency)) var wg sync.WaitGroup - contactsMap := make(map[string]map[string][]*Node) var firstErr error var mu sync.Mutex // To protect the firstErr foundCount := int32(0) gctx, cancel := context.WithCancel(ctx) // Create a cancellable context defer cancel() - for nodeID, node := range nodes { + + for nodeID := range fetchMap { + node, ok := nodes[nodeID] + if !ok { + continue + } + if s.ignorelist.Banned(node) { logtrace.Debug(ctx, "Ignore banned node in iterate batch get values", logtrace.Fields{ logtrace.FieldModule: "dht", @@ -929,18 +933,17 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, continue } - contactsMap[nodeID] = make(map[string][]*Node) wg.Add(1) go func(node *Node, nodeID string) { defer wg.Done() - select { - case <-ctx.Done(): + if err := sem.Acquire(gctx, 1); err != nil { return - case <-gctx.Done(): + } + defer sem.Release(1) + + if atomic.LoadInt32(&foundCount) >= int32(req-alreadyFound) { return - case semaphore <- struct{}{}: - defer func() { <-semaphore }() } indices := fetchMap[nodeID] @@ -984,8 +987,6 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, break } } - } else { - contactsMap[nodeID][k] = v.Closest } } @@ -1008,33 +1009,7 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, }) } - for _, closestNodes := range contactsMap { - for key, nodes := range closestNodes { - comparator, err := hex.DecodeString(key) - if err != nil { - logtrace.Error(ctx, "Failed to decode hex key in closestNodes.Range", logtrace.Fields{ - logtrace.FieldModule: "dht", - "key": key, - logtrace.FieldError: err.Error(), - }) - return 0, nil, err - } - bkey := base58.Encode(comparator) - - if _, ok := closestContacts[bkey]; !ok { - closestContacts[bkey] = &NodeList{Nodes: nodes, Comparator: comparator} - } else { - closestContacts[bkey].AddNodes(nodes) - } - } - } - - for key, nodes := range closestContacts { - nodes.Sort() - nodes.TopN(Alpha) - closestContacts[key] = nodes - } - return int(foundCount), closestContacts, firstErr + return int(foundCount), firstErr } func (s *DHT) doBatchGetValuesCall(ctx context.Context, node *Node, requestKeys map[string]KeyValWithClosest) (map[string]KeyValWithClosest, error) { @@ -1398,14 +1373,39 @@ func (s *DHT) sendStoreData(ctx context.Context, n *Node, request *StoreDataRequ // add a node into the appropriate k bucket, return the removed node if it's full func (s *DHT) addNode(ctx context.Context, node *Node) *Node { + // Minimum-version gating: reject nodes below configured minimum. + peerVer := "" + if node != nil { + peerVer = node.Version + } + if minRequired, tooOld := versionTooOld(peerVer); tooOld { + fields := logtrace.Fields{ + logtrace.FieldModule: "p2p", + "min_required": minRequired, + "peer_version": strings.TrimSpace(peerVer), + } + if node != nil { + fields["peer"] = node.String() + } + logtrace.Debug(ctx, "Rejecting node: peer below minimum version", fields) + return nil + } // Allow localhost for integration testing isIntegrationTest := os.Getenv("INTEGRATION_TEST") == "true" if node.IP == "" || node.IP == "0.0.0.0" || (!isIntegrationTest && node.IP == "127.0.0.1") { - logtrace.Debug(ctx, "Trying to add invalid node", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Info(ctx, "Rejecting node: invalid IP", logtrace.Fields{ + logtrace.FieldModule: "p2p", + "ip": node.IP, + "node": node.String(), + "integration_test": isIntegrationTest, + }) return nil } if bytes.Equal(node.ID, s.ht.self.ID) { - logtrace.Debug(ctx, "Trying to add itself", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Info(ctx, "Rejecting node: is self", logtrace.Fields{ + logtrace.FieldModule: "p2p", + "node": node.String(), + }) return nil } node.SetHashedID() diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index ef542ee5..a5ae39ee 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -28,10 +28,10 @@ import ( const ( defaultConnRate = 1000 - defaultMaxPayloadSize = 200 // MB + defaultMaxPayloadSize = 400 // MB errorBusy = "Busy" maxConcurrentFindBatchValsRequests = 25 - defaultExecTimeout = 10 * time.Second + defaultExecTimeout = 15 * time.Second ) // Global map for message type timeouts @@ -415,6 +415,20 @@ func (s *Network) handleConn(ctx context.Context, rawConn net.Conn) { } } + // Minimum-version gating: reject immediately if peer is below configured minimum + var senderVer string + if request != nil && request.Sender != nil { + senderVer = request.Sender.Version + } + if minRequired, tooOld := versionTooOld(senderVer); tooOld { + logtrace.Debug(ctx, "Rejecting connection: peer below minimum version", logtrace.Fields{ + logtrace.FieldModule: "p2p", + "min_required": minRequired, + "peer_version": strings.TrimSpace(senderVer), + }) + return + } + reqID := uuid.New().String() mt := request.MessageType diff --git a/p2p/kademlia/node.go b/p2p/kademlia/node.go index b7a4baeb..51c495fd 100644 --- a/p2p/kademlia/node.go +++ b/p2p/kademlia/node.go @@ -23,6 +23,9 @@ type Node struct { // port of the node Port uint16 `json:"port,omitempty"` + // Version of the supernode binary (advertised to peers; may be used by min-version gating) + Version string `json:"version,omitempty"` + HashedID []byte } @@ -180,15 +183,13 @@ func (s *NodeList) DelNode(node *Node) { } func haveAllNodes(a, b []*Node) bool { + bSet := make(map[string]bool, len(b)) + for _, y := range b { + bSet[string(y.HashedID)] = true + } + for _, x := range a { - found := false - for _, y := range b { - if bytes.Equal(x.HashedID, y.HashedID) { - found = true - break - } - } - if !found { + if !bSet[string(x.HashedID)] { return false } } diff --git a/p2p/kademlia/node_activity.go b/p2p/kademlia/node_activity.go index 88e09f7a..f2f77e69 100644 --- a/p2p/kademlia/node_activity.go +++ b/p2p/kademlia/node_activity.go @@ -42,7 +42,7 @@ func (s *DHT) checkNodeActivity(ctx context.Context) { var wg sync.WaitGroup for _, info := range repInfo { - info := info // capture + wg.Add(1) sem <- struct{}{} // acquire go func() { @@ -51,8 +51,8 @@ func (s *DHT) checkNodeActivity(ctx context.Context) { node := s.makeNode([]byte(info.ID), info.IP, info.Port) - // Short per-ping timeout (fail fast) - if err := s.pingNode(ctx, node, 3*time.Second); err != nil { + // Per-ping timeout + if err := s.pingNode(ctx, node, 5*time.Second); err != nil { s.handlePingFailure(ctx, info.Active, node, err) return } @@ -76,8 +76,15 @@ func (s *DHT) pingNode(ctx context.Context, n *Node, timeout time.Duration) erro pctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() req := s.newMessage(Ping, n, nil) - _, err := s.network.Call(pctx, req, false) - return err + resp, err := s.network.Call(pctx, req, false) + if err != nil { + return err + } + // Capture remote version from response sender for later gating + if resp != nil && resp.Sender != nil { + n.Version = resp.Sender.Version + } + return nil } func (s *DHT) handlePingFailure(ctx context.Context, wasActive bool, n *Node, err error) { diff --git a/p2p/kademlia/replication.go b/p2p/kademlia/replication.go index 4a36c422..247f43b8 100644 --- a/p2p/kademlia/replication.go +++ b/p2p/kademlia/replication.go @@ -23,7 +23,7 @@ var ( nodeShowUpDeadline = time.Minute * 35 // check for active & inactive nodes after this interval - checkNodeActivityInterval = time.Minute * 2 + checkNodeActivityInterval = time.Minute * 5 defaultFetchAndStoreInterval = time.Minute * 10 diff --git a/p2p/kademlia/version_gate.go b/p2p/kademlia/version_gate.go new file mode 100644 index 00000000..d2d1a755 --- /dev/null +++ b/p2p/kademlia/version_gate.go @@ -0,0 +1,112 @@ +package kademlia + +import ( + "strconv" + "strings" +) + +// localVer is the advertised version of this binary (e.g., v1.2.3), +// injected by the caller (supernode/cmd) at startup. +var localVer string + +// minVer is the optional minimum peer version to accept. If empty, gating is disabled. +var minVer string + +// SetLocalVersion sets the version this node advertises to peers. +func SetLocalVersion(v string) { + localVer = strings.TrimSpace(v) +} + +// SetMinVersion sets the optional minimum required peer version for DHT interactions. +// When empty, version gating is disabled and all peers are accepted regardless of version string. +func SetMinVersion(v string) { + minVer = strings.TrimSpace(v) +} + +// localVersion returns the configured advertised version. +func localVersion() string { return localVer } + +// minimumVersion returns the configured minimum acceptable version; empty disables gating. +func minimumVersion() string { return minVer } + +// versionTooOld reports whether the peerVersion is below the configured minimum version. +// If no minimum is configured, gating is disabled and this returns ("", false). +func versionTooOld(peerVersion string) (minRequired string, tooOld bool) { + minRequired = minimumVersion() + if strings.TrimSpace(minRequired) == "" { + // Gating disabled + return "", false + } + + // Normalize inputs (strip leading 'v' and pre-release/build metadata) + p, okP := parseSemver(peerVersion) + m, okM := parseSemver(minRequired) + if !okM { + // Misconfigured minimum; disable gating to avoid accidental network splits. + return "", false + } + if !okP { + // Peer did not provide a valid version; treat as too old under a min-version policy. + return minRequired, true + } + // Compare peer >= min + if p[0] < m[0] { + return minRequired, true + } + if p[0] > m[0] { + return minRequired, false + } + if p[1] < m[1] { + return minRequired, true + } + if p[1] > m[1] { + return minRequired, false + } + if p[2] < m[2] { + return minRequired, true + } + return minRequired, false +} + +// parseSemver parses versions like "v1.2.3", "1.2.3-alpha" into [major, minor, patch]. +// Returns ok=false if no numeric major part is found. +func parseSemver(v string) ([3]int, bool) { + var out [3]int + s := strings.TrimSpace(v) + if s == "" { + return out, false + } + if s[0] == 'v' || s[0] == 'V' { + s = s[1:] + } + // Drop pre-release/build metadata + if i := strings.IndexAny(s, "-+"); i >= 0 { + s = s[:i] + } + parts := strings.Split(s, ".") + if len(parts) == 0 { + return out, false + } + // Parse up to 3 numeric parts; missing parts default to 0 + for i := 0; i < len(parts) && i < 3; i++ { + numStr := parts[i] + // Trim non-digit suffixes (e.g., "1rc1" -> "1") + j := 0 + for j < len(numStr) && numStr[j] >= '0' && numStr[j] <= '9' { + j++ + } + if j == 0 { + // No leading digits + if i == 0 { + return out, false + } + break + } + n, err := strconv.Atoi(numStr[:j]) + if err != nil { + return out, false + } + out[i] = n + } + return out, true +} diff --git a/pkg/cascadekit/cascadekit_test.go b/pkg/cascadekit/cascadekit_test.go new file mode 100644 index 00000000..d3299705 --- /dev/null +++ b/pkg/cascadekit/cascadekit_test.go @@ -0,0 +1,66 @@ +package cascadekit + +import ( + "encoding/base64" + "testing" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/klauspost/compress/zstd" +) + +func TestExtractIndexAndCreatorSig_Strict(t *testing.T) { + // too few parts + if _, _, err := ExtractIndexAndCreatorSig("abc"); err == nil { + t.Fatalf("expected error for single segment") + } + // too many parts + if _, _, err := ExtractIndexAndCreatorSig("a.b.c"); err == nil { + t.Fatalf("expected error for three segments") + } + // exactly two parts + a, b, err := ExtractIndexAndCreatorSig("a.b") + if err != nil || a != "a" || b != "b" { + t.Fatalf("unexpected result: a=%q b=%q err=%v", a, b, err) + } +} + +func TestParseCompressedIndexFile_Strict(t *testing.T) { + idx := IndexFile{LayoutIDs: []string{"L1", "L2"}, LayoutSignature: base64.StdEncoding.EncodeToString([]byte("sig"))} + idxB64, err := EncodeIndexB64(idx) + if err != nil { + t.Fatalf("encode index: %v", err) + } + payload := []byte(idxB64 + "." + base64.StdEncoding.EncodeToString([]byte("sig2")) + ".0") + + enc, _ := zstd.NewWriter(nil) + defer enc.Close() + compressed := enc.EncodeAll(payload, nil) + + got, err := ParseCompressedIndexFile(compressed) + if err != nil { + t.Fatalf("parse compressed index: %v", err) + } + if got.LayoutSignature != idx.LayoutSignature || len(got.LayoutIDs) != 2 { + t.Fatalf("unexpected index decoded: %+v", got) + } + + // malformed: only two segments + compressedBad := enc.EncodeAll([]byte("a.b"), nil) + if _, err := ParseCompressedIndexFile(compressedBad); err == nil { + t.Fatalf("expected error for two segments") + } + // malformed: four segments + compressedBad4 := enc.EncodeAll([]byte("a.b.c.d"), nil) + if _, err := ParseCompressedIndexFile(compressedBad4); err == nil { + t.Fatalf("expected error for four segments") + } +} + +func TestVerifySingleBlock(t *testing.T) { + if err := VerifySingleBlock(codec.Layout{Blocks: []codec.Block{{}}}); err != nil { + t.Fatalf("unexpected error for single block: %v", err) + } + if err := VerifySingleBlock(codec.Layout{Blocks: []codec.Block{{}, {}}}); err == nil { + t.Fatalf("expected error for multi-block layout") + } +} diff --git a/pkg/cascadekit/doc.go b/pkg/cascadekit/doc.go index 5fa61f7b..326ed87c 100644 --- a/pkg/cascadekit/doc.go +++ b/pkg/cascadekit/doc.go @@ -5,7 +5,7 @@ // Scope: // - Build and sign layout metadata (RaptorQ layout) and index files // - Generate redundant metadata files and index files + their IDs -// - Extract and decode index payloads from the on-chain signatures string +// - Extract and decode index payloads from the on-chain index signature format string // - Compute data hashes for request metadata // - Verify single-block layout consistency (explicit error if more than 1 block) // diff --git a/pkg/cascadekit/hash.go b/pkg/cascadekit/hash.go index 55288123..811f32cf 100644 --- a/pkg/cascadekit/hash.go +++ b/pkg/cascadekit/hash.go @@ -1,26 +1,15 @@ package cascadekit import ( - "bytes" "encoding/base64" - "io" - "lukechampine.com/blake3" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" ) -// ComputeBlake3Hash computes a 32-byte Blake3 hash of the given data. -func ComputeBlake3Hash(msg []byte) ([]byte, error) { - hasher := blake3.New(32, nil) - if _, err := io.Copy(hasher, bytes.NewReader(msg)); err != nil { - return nil, err - } - return hasher.Sum(nil), nil -} - // ComputeBlake3DataHashB64 computes a Blake3 hash of the input and // returns it as a base64-encoded string. func ComputeBlake3DataHashB64(data []byte) (string, error) { - h, err := ComputeBlake3Hash(data) + h, err := utils.Blake3Hash(data) if err != nil { return "", err } diff --git a/pkg/cascadekit/ids.go b/pkg/cascadekit/ids.go index 5c2b404d..bd9540c9 100644 --- a/pkg/cascadekit/ids.go +++ b/pkg/cascadekit/ids.go @@ -2,96 +2,65 @@ package cascadekit import ( "bytes" - "fmt" "strconv" "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/cosmos/btcutil/base58" + "github.com/klauspost/compress/zstd" ) // GenerateLayoutIDs computes IDs for redundant layout files (not the final index IDs). -// The ID is base58(blake3(zstd(layout_b64.layout_sig_b64.counter))). -func GenerateLayoutIDs(layoutB64, layoutSigB64 string, ic, max uint32) []string { - layoutWithSig := fmt.Sprintf("%s.%s", layoutB64, layoutSigB64) - layoutIDs := make([]string, max) - - var buffer bytes.Buffer - buffer.Grow(len(layoutWithSig) + 10) - - for i := uint32(0); i < max; i++ { - buffer.Reset() - buffer.WriteString(layoutWithSig) - buffer.WriteByte('.') - buffer.WriteString(fmt.Sprintf("%d", ic+i)) - - compressedData, err := utils.ZstdCompress(buffer.Bytes()) - if err != nil { - continue - } - - hash, err := utils.Blake3Hash(compressedData) - if err != nil { - continue - } - - layoutIDs[i] = base58.Encode(hash) - } - - return layoutIDs +// The ID is base58(blake3(zstd(layout_signature_format.counter))). +// layoutSignatureFormat must be: base64(JSON(layout)).layout_signature_base64 +func GenerateLayoutIDs(layoutSignatureFormat string, ic, max uint32) ([]string, error) { + return generateIDs([]byte(layoutSignatureFormat), ic, max) } -// GenerateIndexIDs computes IDs for index files from the full signatures string. -func GenerateIndexIDs(signatures string, ic, max uint32) []string { - indexFileIDs := make([]string, max) - - var buffer bytes.Buffer - buffer.Grow(len(signatures) + 10) - - for i := uint32(0); i < max; i++ { - buffer.Reset() - buffer.WriteString(signatures) - buffer.WriteByte('.') - buffer.WriteString(fmt.Sprintf("%d", ic+i)) - - compressedData, err := utils.ZstdCompress(buffer.Bytes()) - if err != nil { - continue - } - hash, err := utils.Blake3Hash(compressedData) - if err != nil { - continue - } - indexFileIDs[i] = base58.Encode(hash) - } - return indexFileIDs +// GenerateIndexIDs computes IDs for index files from the full index signature format string. +func GenerateIndexIDs(indexSignatureFormat string, ic, max uint32) ([]string, error) { + return generateIDs([]byte(indexSignatureFormat), ic, max) } // getIDFiles generates ID files by appending a '.' and counter, compressing, // and returning both IDs and compressed payloads. -func getIDFiles(file []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) { +// generateIDFiles builds compressed ID files from a base payload and returns +// both their content-addressed IDs and the compressed files themselves. +// For each counter in [ic..ic+max-1], the payload is: +// +// base + '.' + counter +// +// then zstd-compressed; the ID is base58(blake3(compressed)). +func generateIDFiles(base []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) { idFiles := make([][]byte, 0, max) ids = make([]string, 0, max) var buffer bytes.Buffer + // Reuse a single zstd encoder across iterations + enc, zerr := zstd.NewWriter(nil) + if zerr != nil { + return ids, idFiles, errors.Errorf("compress identifiers file: %w", zerr) + } + defer enc.Close() + for i := uint32(0); i < max; i++ { buffer.Reset() counter := ic + i - buffer.Write(file) + buffer.Write(base) buffer.WriteByte(SeparatorByte) - buffer.WriteString(strconv.Itoa(int(counter))) + // Append counter efficiently without intermediate string + var tmp [20]byte + cnt := strconv.AppendUint(tmp[:0], uint64(counter), 10) + buffer.Write(cnt) - compressedData, err := utils.ZstdCompress(buffer.Bytes()) - if err != nil { - return ids, idFiles, errors.Errorf("compress identifiers file: %w", err) - } + compressedData := enc.EncodeAll(buffer.Bytes(), nil) idFiles = append(idFiles, compressedData) hash, err := utils.Blake3Hash(compressedData) if err != nil { - return ids, idFiles, errors.Errorf("sha3-256-hash error getting an id file: %w", err) + return ids, idFiles, errors.Errorf("blake3 hash error getting an id file: %w", err) } ids = append(ids, base58.Encode(hash)) @@ -99,3 +68,36 @@ func getIDFiles(file []byte, ic uint32, max uint32) (ids []string, files [][]byt return ids, idFiles, nil } + +// generateIDs computes base58(blake3(zstd(base + '.' + counter))) for counters ic..ic+max-1. +// It reuses a single zstd encoder and avoids per-iteration heap churn. +func generateIDs(base []byte, ic, max uint32) ([]string, error) { + ids := make([]string, max) + + var buffer bytes.Buffer + // Reserve base length + dot + up to 10 digits + buffer.Grow(len(base) + 12) + + enc, err := zstd.NewWriter(nil) + if err != nil { + return nil, errors.Errorf("zstd encoder init: %w", err) + } + defer enc.Close() + + for i := uint32(0); i < max; i++ { + buffer.Reset() + buffer.Write(base) + buffer.WriteByte(SeparatorByte) + var tmp [20]byte + cnt := strconv.AppendUint(tmp[:0], uint64(ic+i), 10) + buffer.Write(cnt) + + compressed := enc.EncodeAll(buffer.Bytes(), nil) + h, err := utils.Blake3Hash(compressed) + if err != nil { + return nil, errors.Errorf("blake3 hash (i=%d): %w", i, err) + } + ids[i] = base58.Encode(h) + } + return ids, nil +} diff --git a/pkg/cascadekit/index.go b/pkg/cascadekit/index.go index e0cb3dce..456b365f 100644 --- a/pkg/cascadekit/index.go +++ b/pkg/cascadekit/index.go @@ -24,13 +24,13 @@ func BuildIndex(layoutIDs []string, layoutSigB64 string) IndexFile { return IndexFile{LayoutIDs: layoutIDs, LayoutSignature: layoutSigB64} } -// EncodeIndexB64 marshals an index file and returns both the raw JSON and base64. -func EncodeIndexB64(idx IndexFile) (b64 string, raw []byte, err error) { - raw, err = json.Marshal(idx) +// EncodeIndexB64 marshals an index file and returns its base64-encoded JSON. +func EncodeIndexB64(idx IndexFile) (string, error) { + raw, err := json.Marshal(idx) if err != nil { - return "", nil, errors.Errorf("marshal index file: %w", err) + return "", errors.Errorf("marshal index file: %w", err) } - return base64.StdEncoding.EncodeToString(raw), raw, nil + return base64.StdEncoding.EncodeToString(raw), nil } // DecodeIndexB64 decodes base64(JSON(IndexFile)). @@ -46,17 +46,12 @@ func DecodeIndexB64(data string) (IndexFile, error) { return indexFile, nil } -// ExtractIndexAndCreatorSig splits a signatures string formatted as: +// ExtractIndexAndCreatorSig splits a signature-format string formatted as: // Base64(index_json).Base64(creator_signature) -func ExtractIndexAndCreatorSig(signatures string) (indexB64 string, creatorSigB64 string, err error) { - parts := strings.Split(signatures, ".") - if len(parts) < 2 { - return "", "", errors.New("invalid signatures format") +func ExtractIndexAndCreatorSig(indexSignatureFormat string) (indexB64 string, creatorSigB64 string, err error) { + parts := strings.Split(indexSignatureFormat, ".") + if len(parts) != 2 { + return "", "", errors.New("invalid index signature format: expected 2 segments (index_b64.creator_sig_b64)") } return parts[0], parts[1], nil } - -// MakeSignatureFormat composes the final signatures string. -func MakeSignatureFormat(indexB64, creatorSigB64 string) string { - return indexB64 + "." + creatorSigB64 -} diff --git a/pkg/cascadekit/index_parse.go b/pkg/cascadekit/index_parse.go index 0fbf3dca..342728d6 100644 --- a/pkg/cascadekit/index_parse.go +++ b/pkg/cascadekit/index_parse.go @@ -15,8 +15,8 @@ func ParseCompressedIndexFile(data []byte) (IndexFile, error) { return IndexFile{}, errors.Errorf("decompress index file: %w", err) } parts := bytes.Split(decompressed, []byte{SeparatorByte}) - if len(parts) < 2 { - return IndexFile{}, errors.New("invalid index file format") + if len(parts) != 3 { + return IndexFile{}, errors.New("invalid index file format: expected 3 parts (index_b64.creator_sig_b64.counter)") } return DecodeIndexB64(string(parts[0])) } diff --git a/pkg/cascadekit/keyring_signatures.go b/pkg/cascadekit/keyring_signatures.go new file mode 100644 index 00000000..968af4b5 --- /dev/null +++ b/pkg/cascadekit/keyring_signatures.go @@ -0,0 +1,14 @@ +package cascadekit + +import ( + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + keyringpkg "github.com/LumeraProtocol/supernode/v2/pkg/keyring" + cosmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" +) + +// CreateSignaturesWithKeyring signs layout and index using a Cosmos keyring. +// These helpers centralize keyring-backed signing for clarity. +func CreateSignaturesWithKeyring(layout codec.Layout, kr cosmoskeyring.Keyring, keyName string, ic, max uint32) (string, []string, error) { + signer := func(msg []byte) ([]byte, error) { return keyringpkg.SignBytes(kr, keyName, msg) } + return CreateSignatures(layout, signer, ic, max) +} diff --git a/pkg/cascadekit/metadata.go b/pkg/cascadekit/metadata.go index 534ef793..a77ddfd4 100644 --- a/pkg/cascadekit/metadata.go +++ b/pkg/cascadekit/metadata.go @@ -6,12 +6,12 @@ import ( // NewCascadeMetadata creates a types.CascadeMetadata for RequestAction. // The keeper will populate rq_ids_max; rq_ids_ids is for FinalizeAction only. -func NewCascadeMetadata(dataHashB64, fileName string, rqIdsIc uint64, signatures string, public bool) actiontypes.CascadeMetadata { +func NewCascadeMetadata(dataHashB64, fileName string, rqIdsIc uint64, indexSignatureFormat string, public bool) actiontypes.CascadeMetadata { return actiontypes.CascadeMetadata{ DataHash: dataHashB64, FileName: fileName, RqIdsIc: rqIdsIc, - Signatures: signatures, + Signatures: indexSignatureFormat, Public: public, } } diff --git a/pkg/cascadekit/parsers.go b/pkg/cascadekit/parsers.go index be950e4f..eb90dde0 100644 --- a/pkg/cascadekit/parsers.go +++ b/pkg/cascadekit/parsers.go @@ -2,11 +2,11 @@ package cascadekit import ( "bytes" + "encoding/json" "github.com/LumeraProtocol/supernode/v2/pkg/codec" "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/utils" - json "github.com/json-iterator/go" ) // ParseRQMetadataFile parses a compressed rq metadata file into layout, signature and counter. diff --git a/pkg/cascadekit/highlevel.go b/pkg/cascadekit/request_builder.go similarity index 53% rename from pkg/cascadekit/highlevel.go rename to pkg/cascadekit/request_builder.go index 16c0072d..695e2fdf 100644 --- a/pkg/cascadekit/highlevel.go +++ b/pkg/cascadekit/request_builder.go @@ -3,28 +3,21 @@ package cascadekit import ( actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" "github.com/LumeraProtocol/supernode/v2/pkg/codec" - keyringpkg "github.com/LumeraProtocol/supernode/v2/pkg/keyring" cosmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" ) -// CreateSignaturesWithKeyring signs layout and index using a Cosmos keyring. -func CreateSignaturesWithKeyring(layout codec.Layout, kr cosmoskeyring.Keyring, keyName string, ic, max uint32) (string, []string, error) { - signer := func(msg []byte) ([]byte, error) { return keyringpkg.SignBytes(kr, keyName, msg) } - return CreateSignatures(layout, signer, ic, max) -} - // BuildCascadeRequest builds a Cascade request metadata from layout and file bytes. -// It computes blake3(data) base64, creates the signatures string and index IDs, +// It computes blake3(data) base64, creates the index signature format and index IDs, // and returns a CascadeMetadata ready for RequestAction. func BuildCascadeRequest(layout codec.Layout, fileBytes []byte, fileName string, kr cosmoskeyring.Keyring, keyName string, ic, max uint32, public bool) (actiontypes.CascadeMetadata, []string, error) { dataHashB64, err := ComputeBlake3DataHashB64(fileBytes) if err != nil { return actiontypes.CascadeMetadata{}, nil, err } - signatures, indexIDs, err := CreateSignaturesWithKeyring(layout, kr, keyName, ic, max) + indexSignatureFormat, indexIDs, err := CreateSignaturesWithKeyring(layout, kr, keyName, ic, max) if err != nil { return actiontypes.CascadeMetadata{}, nil, err } - meta := NewCascadeMetadata(dataHashB64, fileName, uint64(ic), signatures, public) + meta := NewCascadeMetadata(dataHashB64, fileName, uint64(ic), indexSignatureFormat, public) return meta, indexIDs, nil } diff --git a/pkg/cascadekit/rqid.go b/pkg/cascadekit/rqid.go index 3a05eb94..8f6a85aa 100644 --- a/pkg/cascadekit/rqid.go +++ b/pkg/cascadekit/rqid.go @@ -1,63 +1,27 @@ package cascadekit import ( - "context" - "encoding/json" - - "github.com/LumeraProtocol/supernode/v2/pkg/codec" "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" ) -// GenRQIdentifiersFilesResponse groups the generated files and their IDs. -type GenRQIdentifiersFilesResponse struct { - // IDs of the Redundant Metadata Files -- len(RQIDs) == len(RedundantMetadataFiles) - RQIDs []string - // RedundantMetadataFiles is a list of redundant files generated from the Metadata file - RedundantMetadataFiles [][]byte -} - -// GenerateLayoutFiles builds redundant metadata files from layout and signature. +// GenerateLayoutFilesFromB64 builds redundant metadata files using a precomputed +// base64(JSON(layout)) and the layout signature, avoiding an extra JSON marshal. // The content is: base64(JSON(layout)).layout_signature -func GenerateLayoutFiles(ctx context.Context, layout codec.Layout, layoutSigB64 string, ic uint32, max uint32) (GenRQIdentifiersFilesResponse, error) { - // Validate single-block to match package invariant - if len(layout.Blocks) != 1 { - return GenRQIdentifiersFilesResponse{}, errors.New("layout must contain exactly one block") - } - - metadataFile, err := jsonMarshal(layout) - if err != nil { - return GenRQIdentifiersFilesResponse{}, errors.Errorf("marshal layout: %w", err) - } - b64Encoded := utils.B64Encode(metadataFile) - - // Compose: base64(JSON(layout)).layout_signature - enc := make([]byte, 0, len(b64Encoded)+1+len(layoutSigB64)) - enc = append(enc, b64Encoded...) +func GenerateLayoutFilesFromB64(layoutB64 []byte, layoutSigB64 string, ic uint32, max uint32) (ids []string, files [][]byte, err error) { + enc := make([]byte, 0, len(layoutB64)+1+len(layoutSigB64)) + enc = append(enc, layoutB64...) enc = append(enc, SeparatorByte) enc = append(enc, []byte(layoutSigB64)...) - - ids, files, err := getIDFiles(enc, ic, max) - if err != nil { - return GenRQIdentifiersFilesResponse{}, errors.Errorf("get ID Files: %w", err) - } - - return GenRQIdentifiersFilesResponse{ - RedundantMetadataFiles: files, - RQIDs: ids, - }, nil + return generateIDFiles(enc, ic, max) } -// GenerateIndexFiles generates index files and their IDs from the full signatures format. -func GenerateIndexFiles(ctx context.Context, signaturesFormat string, ic uint32, max uint32) (indexIDs []string, indexFiles [][]byte, err error) { - // Use the full signatures format that matches what was sent during RequestAction +// GenerateIndexFiles generates index files and their IDs from the full index signature format. +func GenerateIndexFiles(indexSignatureFormat string, ic uint32, max uint32) (indexIDs []string, indexFiles [][]byte, err error) { + // Use the full index signature format that matches what was sent during RequestAction // The chain expects this exact format for ID generation - indexIDs, indexFiles, err = getIDFiles([]byte(signaturesFormat), ic, max) + indexIDs, indexFiles, err = generateIDFiles([]byte(indexSignatureFormat), ic, max) if err != nil { return nil, nil, errors.Errorf("get index ID files: %w", err) } return indexIDs, indexFiles, nil } - -// jsonMarshal marshals a value to JSON. -func jsonMarshal(v interface{}) ([]byte, error) { return json.Marshal(v) } diff --git a/pkg/cascadekit/serialize.go b/pkg/cascadekit/serialize.go new file mode 100644 index 00000000..21cef3d9 --- /dev/null +++ b/pkg/cascadekit/serialize.go @@ -0,0 +1,29 @@ +package cascadekit + +import ( + "encoding/base64" + "encoding/json" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" +) + +// LayoutJSON marshals a codec.Layout using the standard library encoder. +func LayoutJSON(layout codec.Layout) ([]byte, error) { + b, err := json.Marshal(layout) + if err != nil { + return nil, errors.Errorf("marshal layout: %w", err) + } + return b, nil +} + +// LayoutB64 returns base64(JSON(layout)) bytes using encoding/json for deterministic output. +func LayoutB64(layout codec.Layout) ([]byte, error) { + raw, err := LayoutJSON(layout) + if err != nil { + return nil, err + } + out := make([]byte, base64.StdEncoding.EncodedLen(len(raw))) + base64.StdEncoding.Encode(out, raw) + return out, nil +} diff --git a/pkg/cascadekit/signatures.go b/pkg/cascadekit/signatures.go index 0c71e492..b8a02da9 100644 --- a/pkg/cascadekit/signatures.go +++ b/pkg/cascadekit/signatures.go @@ -33,35 +33,53 @@ func SignLayoutB64(layout codec.Layout, signer Signer) (layoutB64 string, layout return layoutB64, layoutSigB64, nil } -// CreateSignatures reproduces the cascade signature format and index IDs: +// SignIndexB64 marshals the index to JSON, base64-encodes it, and signs the +// base64 payload, returning both the index base64 and creator-signature base64. +func SignIndexB64(idx IndexFile, signer Signer) (indexB64 string, creatorSigB64 string, err error) { + raw, err := json.Marshal(idx) + if err != nil { + return "", "", errors.Errorf("marshal index file: %w", err) + } + indexB64 = base64.StdEncoding.EncodeToString(raw) + + sig, err := signer([]byte(indexB64)) + if err != nil { + return "", "", errors.Errorf("sign index: %w", err) + } + creatorSigB64 = base64.StdEncoding.EncodeToString(sig) + return indexB64, creatorSigB64, nil +} + +// CreateSignatures produces the index signature format and index IDs: // // Base64(index_json).Base64(creator_signature) // // It validates the layout has exactly one block. -func CreateSignatures(layout codec.Layout, signer Signer, ic, max uint32) (signatures string, indexIDs []string, err error) { +func CreateSignatures(layout codec.Layout, signer Signer, ic, max uint32) (indexSignatureFormat string, indexIDs []string, err error) { layoutB64, layoutSigB64, err := SignLayoutB64(layout, signer) if err != nil { return "", nil, err } // Generate layout IDs (not returned; used to populate the index file) - layoutIDs := GenerateLayoutIDs(layoutB64, layoutSigB64, ic, max) + layoutSignatureFormat := layoutB64 + "." + layoutSigB64 + layoutIDs, err := GenerateLayoutIDs(layoutSignatureFormat, ic, max) + if err != nil { + return "", nil, err + } // Build and sign the index file idx := BuildIndex(layoutIDs, layoutSigB64) - indexB64, _, err := EncodeIndexB64(idx) + indexB64, creatorSigB64, err := SignIndexB64(idx, signer) if err != nil { return "", nil, err } + indexSignatureFormat = fmt.Sprintf("%s.%s", indexB64, creatorSigB64) - creatorSig, err := signer([]byte(indexB64)) + // Generate the index IDs (these are the RQIDs sent to chain) + indexIDs, err = GenerateIndexIDs(indexSignatureFormat, ic, max) if err != nil { - return "", nil, errors.Errorf("sign index: %w", err) + return "", nil, err } - creatorSigB64 := base64.StdEncoding.EncodeToString(creatorSig) - signatures = fmt.Sprintf("%s.%s", indexB64, creatorSigB64) - - // Generate the index IDs (these are the RQIDs sent to chain) - indexIDs = GenerateIndexIDs(signatures, ic, max) - return signatures, indexIDs, nil + return indexSignatureFormat, indexIDs, nil } diff --git a/pkg/cascadekit/verify.go b/pkg/cascadekit/verify.go index 5c4ff8a4..74331dde 100644 --- a/pkg/cascadekit/verify.go +++ b/pkg/cascadekit/verify.go @@ -20,3 +20,11 @@ func VerifySingleBlockIDs(ticket, local codec.Layout) error { } return nil } + +// VerifySingleBlock checks that a layout contains exactly one block. +func VerifySingleBlock(layout codec.Layout) error { + if len(layout.Blocks) != 1 { + return errors.New("layout must contain exactly one block") + } + return nil +} diff --git a/pkg/codec/codec.go b/pkg/codec/codec.go index cd751a79..73c31a2a 100644 --- a/pkg/codec/codec.go +++ b/pkg/codec/codec.go @@ -4,9 +4,10 @@ import ( "context" ) -// EncodeResponse represents the response of the encode request. +// EncodeResponse represents the response of the encode request. +// Layout contains the single-block layout produced by the encoder. type EncodeResponse struct { - Metadata Layout + Layout Layout SymbolsDir string } @@ -17,7 +18,7 @@ type Layout struct { // Block is the schema for each entry in the “blocks” array. type Block struct { BlockID int `json:"block_id"` - EncoderParameters []int `json:"encoder_parameters"` + EncoderParameters []uint8 `json:"encoder_parameters"` OriginalOffset int64 `json:"original_offset"` Size int64 `json:"size"` Symbols []string `json:"symbols"` @@ -30,13 +31,20 @@ type EncodeRequest struct { Path string DataSize int } +type CreateMetadataRequest struct { + Path string +} + +// CreateMetadataResponse returns the Layout. +type CreateMetadataResponse struct { + Layout Layout +} // RaptorQ contains methods for request services from RaptorQ service. type Codec interface { // Encode a file Encode(ctx context.Context, req EncodeRequest) (EncodeResponse, error) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) - // CreateMetadata builds the single-block layout metadata for the given file // without generating RaptorQ symbols. - CreateMetadata(ctx context.Context, path string) (Layout, error) + CreateMetadata(ctx context.Context, req CreateMetadataRequest) (CreateMetadataResponse, error) } diff --git a/pkg/codec/codec_default_test.go b/pkg/codec/codec_default_test.go index 537a8d7d..79b97bd1 100644 --- a/pkg/codec/codec_default_test.go +++ b/pkg/codec/codec_default_test.go @@ -34,7 +34,7 @@ func TestEncode_ToDirA(t *testing.T) { t.Logf("encoded to: %s", resp.SymbolsDir) // Log theoretical minimum percentage of symbols needed per block - for _, b := range resp.Metadata.Blocks { + for _, b := range resp.Layout.Blocks { s := int64(rqSymbolSize) if s <= 0 { s = 65535 @@ -120,3 +120,36 @@ func itoa(i int) string { } return string(b[n:]) } + +// TestCreateMetadata_SaveToFile generates layout metadata only and writes it to a file. +func TestCreateMetadata_SaveToFile(t *testing.T) { + if InputPath == "" { + t.Skip("set InputPath constant to a file path to run this test") + } + + ctx := context.TODO() + c := NewRaptorQCodec(BaseDir) + + // Create metadata using the codec and write it next to the input file. + resp, err := c.CreateMetadata(ctx, CreateMetadataRequest{Path: InputPath}) + if err != nil { + t.Fatalf("create metadata: %v", err) + } + data, err := json.MarshalIndent(resp.Layout, "", " ") + if err != nil { + t.Fatalf("marshal metadata: %v", err) + } + outPath := InputPath + ".layout.json" + if err := os.WriteFile(outPath, data, 0o644); err != nil { + t.Fatalf("write output: %v", err) + } + + fi, err := os.Stat(outPath) + if err != nil { + t.Fatalf("stat output: %v", err) + } + if fi.Size() == 0 { + t.Fatalf("output file is empty: %s", outPath) + } + t.Logf("metadata saved to: %s (%d bytes)", outPath, fi.Size()) +} diff --git a/pkg/codec/decode.go b/pkg/codec/decode.go index 348894e4..251f92c4 100644 --- a/pkg/codec/decode.go +++ b/pkg/codec/decode.go @@ -49,6 +49,7 @@ func (rq *raptorQ) PrepareDecode( logtrace.FieldModule: "rq", logtrace.FieldActionID: actionID, } + logtrace.Info(ctx, "rq: prepare-decode start", fields) // Create root symbols dir for this action symbolsDir := filepath.Join(rq.symbolsBaseDir, actionID) @@ -145,10 +146,7 @@ func (rq *raptorQ) PrepareDecode( return os.RemoveAll(symbolsDir) } - logtrace.Debug(ctx, "prepare decode workspace created", logtrace.Fields{ - "symbols_dir": symbolsDir, - "blocks": len(blockDirs), - }) + logtrace.Info(ctx, "rq: prepare-decode ok", logtrace.Fields{"symbols_dir": symbolsDir, "blocks": len(blockDirs)}) return blockDirs, Write, Cleanup, ws, nil } @@ -164,7 +162,7 @@ func (rq *raptorQ) DecodeFromPrepared( logtrace.FieldModule: "rq", logtrace.FieldActionID: ws.ActionID, } - logtrace.Debug(ctx, "RaptorQ decode (prepared) requested", fields) + logtrace.Info(ctx, "rq: decode-from-prepared start", fields) processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) if err != nil { @@ -173,9 +171,39 @@ func (rq *raptorQ) DecodeFromPrepared( } defer processor.Free() - // Write layout.json (idempotent) + // Write layout.json (idempotent). Important: encoder_parameters must be a JSON array, not base64 string. + // Go's encoding/json marshals []byte (aka []uint8) as base64 strings, which rq-go rejects. + // Use a wire struct that maps encoder_parameters to []int to produce a numeric array. + type blockOnDisk struct { + BlockID int `json:"block_id"` + EncoderParameters []int `json:"encoder_parameters"` + OriginalOffset int64 `json:"original_offset"` + Size int64 `json:"size"` + Symbols []string `json:"symbols"` + Hash string `json:"hash"` + } + type layoutOnDisk struct { + Blocks []blockOnDisk `json:"blocks"` + } + var lod layoutOnDisk + lod.Blocks = make([]blockOnDisk, len(layout.Blocks)) + for i, b := range layout.Blocks { + // convert []uint8 (aka []byte) to []int so JSON encodes as numeric array + ep := make([]int, len(b.EncoderParameters)) + for j := range b.EncoderParameters { + ep[j] = int(b.EncoderParameters[j]) + } + lod.Blocks[i] = blockOnDisk{ + BlockID: b.BlockID, + EncoderParameters: ep, + OriginalOffset: b.OriginalOffset, + Size: b.Size, + Symbols: b.Symbols, + Hash: b.Hash, + } + } layoutPath := filepath.Join(ws.SymbolsDir, "layout.json") - layoutBytes, err := json.Marshal(layout) + layoutBytes, err := json.Marshal(lod) if err != nil { fields[logtrace.FieldError] = err.Error() return DecodeResponse{}, fmt.Errorf("marshal layout: %w", err) @@ -184,7 +212,7 @@ func (rq *raptorQ) DecodeFromPrepared( fields[logtrace.FieldError] = err.Error() return DecodeResponse{}, fmt.Errorf("write layout file: %w", err) } - logtrace.Debug(ctx, "layout.json written (prepared)", fields) + logtrace.Info(ctx, "rq: layout written", fields) // Decode to output (idempotent-safe: overwrite on success) outputPath := filepath.Join(ws.SymbolsDir, "output") @@ -194,9 +222,7 @@ func (rq *raptorQ) DecodeFromPrepared( return DecodeResponse{}, fmt.Errorf("raptorq decode: %w", err) } - logtrace.Debug(ctx, "RaptorQ decoding completed successfully (prepared)", logtrace.Fields{ - "output_path": outputPath, - }) + logtrace.Info(ctx, "rq: decode-from-prepared ok", logtrace.Fields{"output_path": outputPath}) return DecodeResponse{FilePath: outputPath, DecodeTmpDir: ws.SymbolsDir}, nil } @@ -206,7 +232,7 @@ func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeRespons logtrace.FieldModule: "rq", logtrace.FieldActionID: req.ActionID, } - logtrace.Debug(ctx, "RaptorQ decode request received", fields) + logtrace.Info(ctx, "rq: decode request", fields) // 1) Validate layout (the check) if len(req.Layout.Blocks) == 0 { @@ -243,7 +269,7 @@ func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeRespons return DecodeResponse{}, werr } } - logtrace.Debug(ctx, "symbols persisted via Write()", fields) + logtrace.Info(ctx, "rq: symbols persisted", logtrace.Fields{"count": len(req.Symbols)}) } // 4) Decode using the prepared workspace (functionality) @@ -253,5 +279,6 @@ func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeRespons return DecodeResponse{}, derr } success = true + logtrace.Info(ctx, "rq: decode ok", fields) return resp, nil } diff --git a/pkg/codec/raptorq.go b/pkg/codec/raptorq.go index 14bad1d9..487f92d8 100644 --- a/pkg/codec/raptorq.go +++ b/pkg/codec/raptorq.go @@ -15,7 +15,7 @@ const ( rqSymbolSize uint16 = 65535 rqRedundancyFactor uint8 = 6 // Limit RaptorQ processor memory usage to ~2 GiB - rqMaxMemoryMB uint64 = 4 * 1024 // MB + rqMaxMemoryMB uint64 = 8 * 1024 // MB // Concurrency tuned for 2 GiB limit and typical 8+ core CPUs rqConcurrency uint64 = 1 // Target single-block output for up to 1 GiB files with padding headroom (~1.25 GiB) @@ -43,6 +43,7 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons "data-size": req.DataSize, } + logtrace.Info(ctx, "rq: encode start", fields) processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) if err != nil { return EncodeResponse{}, fmt.Errorf("create RaptorQ processor: %w", err) @@ -77,34 +78,35 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons } var encodeResp EncodeResponse - if err := json.Unmarshal(layoutData, &encodeResp.Metadata); err != nil { + if err := json.Unmarshal(layoutData, &encodeResp.Layout); err != nil { return EncodeResponse{}, fmt.Errorf("unmarshal layout: %w", err) } encodeResp.SymbolsDir = symbolsDir // Enforce single-block output; abort if multiple blocks are produced - if n := len(encodeResp.Metadata.Blocks); n != 1 { + if n := len(encodeResp.Layout.Blocks); n != 1 { return EncodeResponse{}, fmt.Errorf("raptorq encode produced %d blocks; single-block layout is required", n) } - + logtrace.Info(ctx, "rq: encode ok", logtrace.Fields{"symbols_dir": encodeResp.SymbolsDir}) return encodeResp, nil } // CreateMetadata builds only the layout metadata for the given file without generating symbols. -func (rq *raptorQ) CreateMetadata(ctx context.Context, path string) (Layout, error) { +func (rq *raptorQ) CreateMetadata(ctx context.Context, req CreateMetadataRequest) (CreateMetadataResponse, error) { // Populate fields; include data-size by stat-ing the file to preserve existing log fields fields := logtrace.Fields{ logtrace.FieldMethod: "CreateMetadata", logtrace.FieldModule: "rq", - "path": path, + "path": req.Path, } - if fi, err := os.Stat(path); err == nil { + if fi, err := os.Stat(req.Path); err == nil { fields["data-size"] = int(fi.Size()) } + logtrace.Info(ctx, "rq: create-metadata start", fields) processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) if err != nil { - return Layout{}, fmt.Errorf("create RaptorQ processor: %w", err) + return CreateMetadataResponse{}, fmt.Errorf("create RaptorQ processor: %w", err) } defer processor.Free() logtrace.Debug(ctx, "RaptorQ processor created", fields) @@ -120,33 +122,33 @@ func (rq *raptorQ) CreateMetadata(ctx context.Context, path string) (Layout, err tmpDir, err := os.MkdirTemp(base, "rq_meta_*") if err != nil { fields[logtrace.FieldError] = err.Error() - return Layout{}, fmt.Errorf("mkdir temp dir: %w", err) + return CreateMetadataResponse{}, fmt.Errorf("mkdir temp dir: %w", err) } defer os.RemoveAll(tmpDir) layoutPath := filepath.Join(tmpDir, "layout.json") // Use rq-go's metadata-only creation; no symbols are produced here. - resp, err := processor.CreateMetadata(path, layoutPath, blockSize) + resp, err := processor.CreateMetadata(req.Path, layoutPath, blockSize) if err != nil { fields[logtrace.FieldError] = err.Error() - return Layout{}, fmt.Errorf("raptorq create metadata: %w", err) + return CreateMetadataResponse{}, fmt.Errorf("raptorq create metadata: %w", err) } layoutData, err := os.ReadFile(resp.LayoutFilePath) if err != nil { fields[logtrace.FieldError] = err.Error() - return Layout{}, fmt.Errorf("read layout %s: %w", resp.LayoutFilePath, err) + return CreateMetadataResponse{}, fmt.Errorf("read layout %s: %w", resp.LayoutFilePath, err) } var layout Layout if err := json.Unmarshal(layoutData, &layout); err != nil { - return Layout{}, fmt.Errorf("unmarshal layout: %w", err) + return CreateMetadataResponse{}, fmt.Errorf("unmarshal layout: %w", err) } // Enforce single-block output; abort if multiple blocks are produced if n := len(layout.Blocks); n != 1 { - return Layout{}, fmt.Errorf("raptorq metadata produced %d blocks; single-block layout is required", n) + return CreateMetadataResponse{}, fmt.Errorf("raptorq metadata produced %d blocks; single-block layout is required", n) } - - return layout, nil + logtrace.Info(ctx, "rq: create-metadata ok", logtrace.Fields{"blocks": len(layout.Blocks)}) + return CreateMetadataResponse{Layout: layout}, nil } diff --git a/pkg/common/blocktracker/block_tracker.go b/pkg/common/blocktracker/block_tracker.go deleted file mode 100644 index 00f8c512..00000000 --- a/pkg/common/blocktracker/block_tracker.go +++ /dev/null @@ -1,121 +0,0 @@ -package blocktracker - -import ( - "context" - "sync" - "time" - - "github.com/LumeraProtocol/supernode/v2/pkg/errors" -) - -const ( - defaultRetries = 3 - defaultDelayDurationBetweenRetries = 5 * time.Second - defaultRPCConnectTimeout = 15 * time.Second - // Update duration in case last update was success - defaultSuccessUpdateDuration = 10 * time.Second - // Update duration in case last update was failed - prevent too much call to Lumera - defaultFailedUpdateDuration = 5 * time.Second - defaultNextBlockTimeout = 30 * time.Minute -) - -// LumeraClient defines interface functions BlockCntTracker expects from Lumera -type LumeraClient interface { - // GetBlockCount returns block height of blockchain - GetBlockCount(ctx context.Context) (int32, error) -} - -// BlockCntTracker defines a block tracker - that will keep current block height -type BlockCntTracker struct { - mtx sync.Mutex - LumeraClient LumeraClient - curBlockCnt int32 - lastSuccess time.Time - lastRetried time.Time - lastErr error - delayBetweenRetries time.Duration - retries int -} - -// New returns an instance of BlockCntTracker -func New(LumeraClient LumeraClient) *BlockCntTracker { - return &BlockCntTracker{ - LumeraClient: LumeraClient, - curBlockCnt: 0, - delayBetweenRetries: defaultDelayDurationBetweenRetries, - retries: defaultRetries, - } -} - -func (tracker *BlockCntTracker) refreshBlockCount(retries int) { - tracker.lastRetried = time.Now().UTC() - for i := 0; i < retries; i = i + 1 { - ctx, cancel := context.WithTimeout(context.Background(), defaultRPCConnectTimeout) - blockCnt, err := tracker.LumeraClient.GetBlockCount(ctx) - if err == nil { - tracker.curBlockCnt = blockCnt - tracker.lastSuccess = time.Now().UTC() - cancel() - tracker.lastErr = nil - return - } - cancel() - - tracker.lastErr = err - // delay between retries - time.Sleep(tracker.delayBetweenRetries) - } - -} - -// GetBlockCount return current block count -// it will get from cache if last refresh is small than defaultSuccessUpdateDuration -// or will refresh it by call from Lumera daemon to get the latest one if defaultSuccessUpdateDuration expired -func (tracker *BlockCntTracker) GetBlockCount() (int32, error) { - tracker.mtx.Lock() - defer tracker.mtx.Unlock() - - shouldRefresh := false - - if tracker.lastSuccess.After(tracker.lastRetried) { - if time.Now().UTC().After(tracker.lastSuccess.Add(defaultSuccessUpdateDuration)) { - shouldRefresh = true - } - } else { - // prevent update too much - if time.Now().UTC().After(tracker.lastRetried.Add(defaultFailedUpdateDuration)) { - shouldRefresh = true - } - } - - if shouldRefresh { - tracker.refreshBlockCount(tracker.retries) - } - - if tracker.curBlockCnt == 0 { - return 0, errors.Errorf("failed to get blockcount: %w", tracker.lastErr) - } - - return tracker.curBlockCnt, nil -} - -// WaitTillNextBlock will wait until next block height is greater than blockCnt -func (tracker *BlockCntTracker) WaitTillNextBlock(ctx context.Context, blockCnt int32) error { - for { - select { - case <-ctx.Done(): - return errors.Errorf("context done: %w", ctx.Err()) - case <-time.After(defaultNextBlockTimeout): - return errors.Errorf("timeout waiting for next block") - case <-time.After(defaultSuccessUpdateDuration): - curBlockCnt, err := tracker.GetBlockCount() - if err != nil { - return errors.Errorf("failed to get blockcount: %w", err) - } - - if curBlockCnt > blockCnt { - return nil - } - } - } -} diff --git a/pkg/common/blocktracker/block_tracker_test.go b/pkg/common/blocktracker/block_tracker_test.go deleted file mode 100644 index b070a4b7..00000000 --- a/pkg/common/blocktracker/block_tracker_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package blocktracker - -import ( - "context" - "errors" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -type fakePastelClient struct { - retBlockCnt int32 - retErr error -} - -func (fake *fakePastelClient) GetBlockCount(_ context.Context) (int32, error) { - return fake.retBlockCnt, fake.retErr -} - -func TestGetCountFirstTime(t *testing.T) { - tests := []struct { - name string - pastelClient *fakePastelClient - expectErr bool - }{ - { - name: "success", - pastelClient: &fakePastelClient{ - retBlockCnt: 10, - retErr: nil, - }, - expectErr: false, - }, - { - name: "fail", - pastelClient: &fakePastelClient{ - retBlockCnt: 0, - retErr: errors.New("error"), - }, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tracker := New(tt.pastelClient) - tracker.retries = 1 - blkCnt, err := tracker.GetBlockCount() - assert.Equal(t, tt.pastelClient.retBlockCnt, blkCnt) - if tt.expectErr { - assert.True(t, strings.Contains(err.Error(), tt.pastelClient.retErr.Error())) - } else { - assert.Nil(t, err) - } - }) - } -} - -func TestGetBlockCountNoRefresh(t *testing.T) { - pastelClient := &fakePastelClient{ - retBlockCnt: 10, - retErr: errors.New("error"), - } - - expectedBlk := int32(1) - tracker := New(pastelClient) - tracker.retries = 1 - tracker.curBlockCnt = expectedBlk - tracker.lastRetried = time.Now().UTC() - tracker.lastSuccess = time.Now().UTC() - - blkCnt, err := tracker.GetBlockCount() - assert.Equal(t, expectedBlk, blkCnt) - - assert.Nil(t, err) -} - -func TestGetBlockCountRefresh(t *testing.T) { - expectedBlk := int32(10) - pastelClient := &fakePastelClient{ - retBlockCnt: expectedBlk, - retErr: nil, - } - - tracker := New(pastelClient) - tracker.retries = 1 - tracker.curBlockCnt = 1 - tracker.lastRetried = time.Now().UTC().Add(-defaultSuccessUpdateDuration) - tracker.lastSuccess = time.Now().UTC().Add(-defaultSuccessUpdateDuration) - - blkCnt, err := tracker.GetBlockCount() - assert.Equal(t, expectedBlk, blkCnt) - - assert.Nil(t, err) -} diff --git a/pkg/common/task/action.go b/pkg/common/task/action.go deleted file mode 100644 index 227ebe35..00000000 --- a/pkg/common/task/action.go +++ /dev/null @@ -1,20 +0,0 @@ -package task - -import "context" - -// ActionFn represents a function that is run inside a goroutine. -type ActionFn func(ctx context.Context) error - -// Action represents the action of the task. -type Action struct { - fn ActionFn - doneCh chan struct{} -} - -// NewAction returns a new Action instance. -func NewAction(fn ActionFn) *Action { - return &Action{ - fn: fn, - doneCh: make(chan struct{}), - } -} diff --git a/pkg/common/task/state/state.go b/pkg/common/task/state/state.go deleted file mode 100644 index 05179a85..00000000 --- a/pkg/common/task/state/state.go +++ /dev/null @@ -1,174 +0,0 @@ -//go:generate mockery --name=State - -package state - -import ( - "context" - "sync" - "time" - - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/queries" - "github.com/LumeraProtocol/supernode/v2/pkg/types" -) - -// State represents a state of the task. -type State interface { - // Status returns the current status. - Status() *Status - - // SetStatusNotifyFunc sets a function to be called after the state is updated. - SetStatusNotifyFunc(fn func(status *Status)) - - // RequiredStatus returns an error if the current status doen't match the given one. - RequiredStatus(subStatus SubStatus) error - - // StatusHistory returns all history from the very beginning. - StatusHistory() []*Status - - // UpdateStatus updates the status of the state by creating a new status with the given `status`. - UpdateStatus(subStatus SubStatus) - - // SubscribeStatus returns a new subscription of the state. - SubscribeStatus() func() <-chan *Status - - //SetStateLog set the wallet node task status log to the state status log - SetStateLog(statusLog types.Fields) - - //InitialiseHistoryDB sets the connection to historyDB - InitialiseHistoryDB(store queries.LocalStoreInterface) -} - -type state struct { - status *Status - history []*Status - - notifyFn func(status *Status) - sync.RWMutex - subsCh []chan *Status - taskID string - statusLog types.Fields - historyDBStore queries.LocalStoreInterface -} - -// Status implements State.Status() -func (state *state) Status() *Status { - return state.status -} - -// SetStatusNotifyFunc implements State.SetStatusNotifyFunc() -func (state *state) SetStatusNotifyFunc(fn func(status *Status)) { - state.notifyFn = fn -} - -// RequiredStatus implements State.RequiredStatus() -func (state *state) RequiredStatus(subStatus SubStatus) error { - if state.status.Is(subStatus) { - return nil - } - return errors.Errorf("required status %q, current %q", subStatus, state.status) -} - -// StatusHistory implements State.StatusHistory() -func (state *state) StatusHistory() []*Status { - state.RLock() - defer state.RUnlock() - - return append(state.history, state.status) -} - -// UpdateStatus implements State.UpdateStatus() -func (state *state) UpdateStatus(subStatus SubStatus) { - state.Lock() - defer state.Unlock() - - status := NewStatus(subStatus) - state.history = append(state.history, state.status) - state.status = status - - history := types.TaskHistory{CreatedAt: time.Now().UTC(), TaskID: state.taskID, Status: status.String()} - if state.statusLog.IsValid() { - history.Details = types.NewDetails(status.String(), state.statusLog) - } - - if state.historyDBStore != nil { - if _, err := state.historyDBStore.InsertTaskHistory(history); err != nil { - logtrace.Error(context.Background(), "unable to store task status", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - } else { - store, err := queries.OpenHistoryDB() - if err != nil { - logtrace.Error(context.Background(), "error opening history db", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - - if store != nil { - defer store.CloseHistoryDB(context.Background()) - if _, err := store.InsertTaskHistory(history); err != nil { - logtrace.Error(context.Background(), "unable to store task status", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - } - } - - if state.notifyFn != nil { - state.notifyFn(status) - } - - for _, subCh := range state.subsCh { - subCh := subCh - go func() { - subCh <- status - }() - } -} - -// SubscribeStatus implements State.SubscribeStatus() -func (state *state) SubscribeStatus() func() <-chan *Status { - state.RLock() - defer state.RUnlock() - - subCh := make(chan *Status) - state.subsCh = append(state.subsCh, subCh) - - for _, status := range append(state.history, state.status) { - status := status - go func() { - subCh <- status - }() - } - - sub := func() <-chan *Status { - return subCh - } - return sub -} - -func (state *state) SetStateLog(statusLog types.Fields) { - state.statusLog = statusLog -} - -func (state *state) InitialiseHistoryDB(storeInterface queries.LocalStoreInterface) { - state.historyDBStore = storeInterface -} - -// New returns a new state instance. -func New(subStatus SubStatus, taskID string) State { - store, err := queries.OpenHistoryDB() - if err != nil { - logtrace.Error(context.Background(), "error opening history db", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - - if store != nil { - defer store.CloseHistoryDB(context.Background()) - - if _, err := store.InsertTaskHistory(types.TaskHistory{CreatedAt: time.Now().UTC(), TaskID: taskID, - Status: subStatus.String()}); err != nil { - logtrace.Error(context.Background(), "unable to store task status", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - } - - return &state{ - status: NewStatus(subStatus), - taskID: taskID, - } -} diff --git a/pkg/common/task/state/status.go b/pkg/common/task/state/status.go deleted file mode 100644 index b1b00da6..00000000 --- a/pkg/common/task/state/status.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:generate mockery --name=SubStatus - -package state - -import ( - "fmt" - "time" -) - -// SubStatus represents a sub-status that contains a description of the status. -type SubStatus interface { - fmt.Stringer - IsFinal() bool - IsFailure() bool -} - -// Status represents a state of the task. -type Status struct { - CreatedAt time.Time - SubStatus -} - -// Is returns true if the current `Status` matches to the given `statuses`. -func (status *Status) Is(subStatus SubStatus) bool { - return status.SubStatus == subStatus -} - -// NewStatus returns a new Status instance. -func NewStatus(subStatus SubStatus) *Status { - return &Status{ - CreatedAt: time.Now().UTC(), - SubStatus: subStatus, - } -} diff --git a/pkg/common/task/task.go b/pkg/common/task/task.go deleted file mode 100644 index adf173e4..00000000 --- a/pkg/common/task/task.go +++ /dev/null @@ -1,143 +0,0 @@ -//go:generate mockery --name=Task - -package task - -import ( - "context" - "sync" - - "github.com/LumeraProtocol/supernode/v2/pkg/common/task/state" - "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/random" -) - -// Task represent a worker task. -type Task interface { - state.State - - // ID returns id of the task. - ID() string - - // Run starts the task. - Run(ctx context.Context) error - - // Cancel tells a task to abandon its work. - // Cancel may be called by multiple goroutines simultaneously. - // After the first call, subsequent calls to a Cancel do nothing. - Cancel() - - // Done returns a channel when the task is canceled. - Done() <-chan struct{} - - // RunAction waits for new actions, starts handling each of them in a new goroutine. - RunAction(ctx context.Context) error - - // NewAction creates a new action and passes for the execution. - // It is used when it is necessary to run an action in the context of `Tasks` rather than the one who was called. - NewAction(fn ActionFn) <-chan struct{} - - // CloseActionCh closes action ch - CloseActionCh() -} - -type task struct { - state.State - - id string - - actionCh chan *Action - - doneMu sync.Mutex - doneCh chan struct{} - closeOnce sync.Once -} - -// ID implements Task.ID -func (task *task) ID() string { - return task.id -} - -// Run implements Task.Run -func (task *task) Run(_ context.Context) error { - return errors.New("task default run func not implemented") -} - -// Cancel implements Task.Cancel -func (task *task) Cancel() { - task.doneMu.Lock() - defer task.doneMu.Unlock() - - select { - case <-task.Done(): - logtrace.Debug(context.Background(), "task cancelled", logtrace.Fields{"task_id": task.ID()}) - return - default: - close(task.doneCh) - } -} - -// Done implements Task.Done -func (task *task) Done() <-chan struct{} { - return task.doneCh -} - -// RunAction implements Task.RunAction -func (task *task) RunAction(ctx context.Context) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - group, ctx := errgroup.WithContext(ctx) - for { - select { - case <-ctx.Done(): - logtrace.Debug(ctx, "context done", logtrace.Fields{"task_id": task.ID()}) - case <-task.Done(): - logtrace.Debug(ctx, "task done", logtrace.Fields{"task_id": task.ID()}) - cancel() - case action, ok := <-task.actionCh: - if !ok { - logtrace.Debug(ctx, "action channel closed", logtrace.Fields{"task_id": task.ID()}) - return group.Wait() - } - - currAction := action - group.Go(func() error { - defer close(currAction.doneCh) - - return currAction.fn(ctx) - }) - continue - } - break - } - - return group.Wait() -} - -// CloseActionCh safely closes the action channel -func (task *task) CloseActionCh() { - task.closeOnce.Do(func() { - close(task.actionCh) - }) -} - -// NewAction implements Task.NewAction -func (task *task) NewAction(fn ActionFn) <-chan struct{} { - act := NewAction(fn) - task.actionCh <- act - return act.doneCh -} - -// New returns a new task instance. -func New(status state.SubStatus) Task { - taskID, _ := random.String(8, random.Base62Chars) - - return &task{ - State: state.New(status, taskID), - id: taskID, - doneCh: make(chan struct{}), - actionCh: make(chan *Action), - } -} diff --git a/pkg/common/task/ticket.go b/pkg/common/task/ticket.go deleted file mode 100644 index 561b8f0b..00000000 --- a/pkg/common/task/ticket.go +++ /dev/null @@ -1,13 +0,0 @@ -package task - -type CascadeTicket struct { - Creator string `json:"creator"` - CreatorSignature []byte `json:"creator_signature"` - DataHash string `json:"data_hash"` - ActionID string `json:"action_id"` - BlockHeight int64 `json:"block_height"` - BlockHash []byte `json:"block_hash"` - RQIDsIC uint32 `json:"rqids_ic"` - RQIDsMax int32 `json:"rqids_max"` - RQIDs []string `json:"rq_ids"` -} diff --git a/pkg/common/task/worker.go b/pkg/common/task/worker.go deleted file mode 100644 index 14043079..00000000 --- a/pkg/common/task/worker.go +++ /dev/null @@ -1,144 +0,0 @@ -package task - -import ( - "context" - "sync" - "time" - - "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" -) - -// Worker represents a pool of the task. -type Worker struct { - sync.Mutex - - tasks []Task - taskCh chan Task -} - -// Tasks returns all tasks. -func (worker *Worker) Tasks() []Task { - worker.Lock() - defer worker.Unlock() - - // return a shallow copy to avoid data races - copied := make([]Task, len(worker.tasks)) - copy(copied, worker.tasks) - return copied -} - -// Task returns the task by the given id. -func (worker *Worker) Task(taskID string) Task { - worker.Lock() - defer worker.Unlock() - - for _, task := range worker.tasks { - if task.ID() == taskID { - return task - } - } - return nil -} - -// AddTask adds the new task. -func (worker *Worker) AddTask(task Task) { - worker.Lock() - defer worker.Unlock() - - worker.tasks = append(worker.tasks, task) - worker.taskCh <- task - - // Proactively remove the task once it's done to prevent lingering entries - go func(t Task) { - <-t.Done() - // remove promptly when the task signals completion/cancelation - worker.RemoveTask(t) - }(task) -} - -// RemoveTask removes the task. -func (worker *Worker) RemoveTask(subTask Task) { - worker.Lock() - defer worker.Unlock() - - for i, task := range worker.tasks { - if task == subTask { - worker.tasks = append(worker.tasks[:i], worker.tasks[i+1:]...) - return - } - } -} - -// Run waits for new tasks, starts handling each of them in a new goroutine. -func (worker *Worker) Run(ctx context.Context) error { - group, _ := errgroup.WithContext(ctx) // Create an error group but ignore the derived context - // Background sweeper to prune finalized tasks that might linger - // even if the task's Run wasn't executed to completion. - sweeperCtx, sweeperCancel := context.WithCancel(ctx) - defer sweeperCancel() - go worker.cleanupLoop(sweeperCtx) - for { - select { - case <-ctx.Done(): - logtrace.Warn(ctx, "Worker run stopping", logtrace.Fields{logtrace.FieldError: ctx.Err().Error()}) - return group.Wait() - case t := <-worker.taskCh: // Rename here - currentTask := t // Capture the loop variable - group.Go(func() error { - defer func() { - if r := recover(); r != nil { - logtrace.Error(ctx, "Recovered from panic in common task's worker run", logtrace.Fields{"task": currentTask.ID(), "error": r}) - } - - logtrace.Debug(ctx, "Task Removed", logtrace.Fields{"task": currentTask.ID()}) - // Remove the task from the worker's task list - worker.RemoveTask(currentTask) - }() - - return currentTask.Run(ctx) // Use the captured variable - }) - } - } -} - -// NewWorker returns a new Worker instance. -func NewWorker() *Worker { - w := &Worker{taskCh: make(chan Task)} - return w -} - -// cleanupLoop periodically removes tasks that are in a final state for a grace period -func (worker *Worker) cleanupLoop(ctx context.Context) { - const ( - cleanupInterval = 30 * time.Second - finalTaskTTL = 2 * time.Minute - ) - - ticker := time.NewTicker(cleanupInterval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - now := time.Now() - worker.Lock() - // iterate and compact in-place - kept := worker.tasks[:0] - for _, t := range worker.tasks { - st := t.Status() - if st != nil && st.SubStatus != nil && st.SubStatus.IsFinal() { - if now.Sub(st.CreatedAt) >= finalTaskTTL { - // drop this finalized task - continue - } - } - kept = append(kept, t) - } - worker.tasks = kept - worker.Unlock() - } - } -} diff --git a/pkg/common/task/worker_test.go b/pkg/common/task/worker_test.go deleted file mode 100644 index 4c5f21ac..00000000 --- a/pkg/common/task/worker_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package task - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestWorkerTasks(t *testing.T) { - t.Parallel() - - type fields struct { - tasks []Task - } - tests := []struct { - name string - fields fields - want []Task - }{ - { - name: "retrieve tasks", - fields: fields{ - tasks: []Task{&task{id: "1"}, &task{id: "2"}}, - }, - want: []Task{&task{id: "1"}, &task{id: "2"}}, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - worker := &Worker{ - tasks: tt.fields.tasks, - } - assert.Equal(t, tt.want, worker.Tasks()) - }) - } -} - -func TestWorkerTask(t *testing.T) { - t.Parallel() - - type fields struct { - tasks []Task - } - type args struct { - taskID string - } - tests := []struct { - name string - fields fields - args args - want Task - }{ - { - name: "get task with id 1", - fields: fields{ - tasks: []Task{&task{id: "1"}, &task{id: "2"}}, - }, - args: args{"2"}, - want: &task{id: "2"}, - }, - { - name: "get not exist task", - fields: fields{ - tasks: []Task{&task{id: "1"}, &task{id: "2"}}, - }, - args: args{"3"}, - want: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - worker := &Worker{ - tasks: tt.fields.tasks, - } - assert.Equal(t, tt.want, worker.Task(tt.args.taskID)) - }) - } -} - -func TestWorkerAddTask(t *testing.T) { - t.Parallel() - - type args struct { - task Task - } - tests := []struct { - name string - args args - want []Task - }{ - { - name: "add task", - args: args{&task{id: "1"}}, - want: []Task{&task{id: "1"}}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - worker := &Worker{ - taskCh: make(chan Task), - } - - go func() { - worker.AddTask(tt.args.task) - }() - - <-worker.taskCh - tasks := worker.tasks - assert.Equal(t, tt.want, tasks) - - }) - } -} - -func TestWorkerRemoveTask(t *testing.T) { - t.Parallel() - - type args struct { - subTask Task - } - tests := []struct { - name string - args args - want []Task - }{ - { - name: "removed task", - args: args{&task{id: "1"}}, - want: []Task{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - worker := &Worker{ - tasks: []Task{tt.args.subTask}, - } - - worker.RemoveTask(tt.args.subTask) - assert.Equal(t, tt.want, worker.tasks) - }) - } -} diff --git a/pkg/dd/client.go b/pkg/dd/client.go deleted file mode 100644 index f7b10c80..00000000 --- a/pkg/dd/client.go +++ /dev/null @@ -1,46 +0,0 @@ -package dd - -import ( - "context" - "time" - - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/random" - "google.golang.org/grpc" - "google.golang.org/grpc/encoding/gzip" -) - -const ( - defaultConnectTimeout = 60 * time.Second -) - -type client struct{} - -// Connect implements node.Client.Connect() -func (cl *client) Connect(ctx context.Context, address string) (Connection, error) { - // Limits the dial timeout, prevent got stuck too long - dialCtx, cancel := context.WithTimeout(ctx, defaultConnectTimeout) - defer cancel() - - id, _ := random.String(8, random.Base62Chars) - - grpcConn, err := grpc.DialContext(dialCtx, address, - //lint:ignore SA1019 we want to ignore this for now - grpc.WithInsecure(), - grpc.WithBlock(), - grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name), grpc.MaxCallRecvMsgSize(35000000)), - ) - if err != nil { - return nil, errors.Errorf("fail to dial: %w", err).WithField("address", address) - } - - logtrace.Debug(ctx, "Connected to address with max recv size 35 MB", logtrace.Fields{logtrace.FieldModule: "dd", "address": address}) - - conn := newClientConn(id, grpcConn) - go func() { - //<-conn.Done() // FIXME: to be implemented by new gRPC package - logtrace.Debug(ctx, "Disconnected", logtrace.Fields{logtrace.FieldModule: "dd", "target": grpcConn.Target()}) - }() - return conn, nil -} diff --git a/pkg/dd/config.go b/pkg/dd/config.go deleted file mode 100644 index c0ca0607..00000000 --- a/pkg/dd/config.go +++ /dev/null @@ -1,50 +0,0 @@ -package dd - -import ( - "fmt" - "path/filepath" -) - -const ( - errValidationStr = "ddserver client validation failed - missing val" -) - -// Config contains settings of the dd-server -type Config struct { - // Host the queries IPv4 or IPv6 address - Host string `mapstructure:"host" json:"host,omitempty"` - - // Port the queries port to listen for connections on - Port int `mapstructure:"port" json:"port,omitempty"` - - // DDFilesDir - the location of temporary folder to transfer image data to ddserver - DDFilesDir string `mapstructure:"dd-temp-file-dir" json:"dd-temp-file-dir,omitempty"` -} - -// NewConfig returns a new Config instance. -func NewConfig() *Config { - return &Config{} -} - -// SetWorkDir update working dir -func (config *Config) SetWorkDir(workDir string) { - if !filepath.IsAbs(config.DDFilesDir) { - config.DDFilesDir = filepath.Join(workDir, config.DDFilesDir) - } -} - -// Validate raptorq configs -func (config *Config) Validate() error { - if config.Host == "" { - return fmt.Errorf("%s: %s", errValidationStr, "host") - } - if config.Port == 0 { - return fmt.Errorf("%s: %s", errValidationStr, "port") - } - - if config.DDFilesDir == "" { - return fmt.Errorf("%s: %s", errValidationStr, "dd-temp-file-dir") - } - - return nil -} diff --git a/pkg/dd/connection.go b/pkg/dd/connection.go deleted file mode 100644 index 34f3b20e..00000000 --- a/pkg/dd/connection.go +++ /dev/null @@ -1,23 +0,0 @@ -package dd - -import ( - "google.golang.org/grpc" -) - -// clientConn represents grpc client conneciton. -type clientConn struct { - *grpc.ClientConn - - id string -} - -func (conn *clientConn) DDService(config *Config) DDService { - return newDDServerClient(conn, config) -} - -func newClientConn(id string, conn *grpc.ClientConn) *clientConn { - return &clientConn{ - ClientConn: conn, - id: id, - } -} diff --git a/pkg/dd/dd_mock.go b/pkg/dd/dd_mock.go deleted file mode 100644 index 224831c6..00000000 --- a/pkg/dd/dd_mock.go +++ /dev/null @@ -1,162 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: interfaces.go -// -// Generated by this command: -// -// mockgen -destination=dd_mock.go -package=dd -source=interfaces.go -// - -// Package dd is a generated GoMock package. -package dd - -import ( - context "context" - reflect "reflect" - - gomock "go.uber.org/mock/gomock" -) - -// MockClientInterface is a mock of ClientInterface interface. -type MockClientInterface struct { - ctrl *gomock.Controller - recorder *MockClientInterfaceMockRecorder - isgomock struct{} -} - -// MockClientInterfaceMockRecorder is the mock recorder for MockClientInterface. -type MockClientInterfaceMockRecorder struct { - mock *MockClientInterface -} - -// NewMockClientInterface creates a new mock instance. -func NewMockClientInterface(ctrl *gomock.Controller) *MockClientInterface { - mock := &MockClientInterface{ctrl: ctrl} - mock.recorder = &MockClientInterfaceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockClientInterface) EXPECT() *MockClientInterfaceMockRecorder { - return m.recorder -} - -// Connect mocks base method. -func (m *MockClientInterface) Connect(ctx context.Context, address string) (Connection, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Connect", ctx, address) - ret0, _ := ret[0].(Connection) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Connect indicates an expected call of Connect. -func (mr *MockClientInterfaceMockRecorder) Connect(ctx, address any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockClientInterface)(nil).Connect), ctx, address) -} - -// MockConnection is a mock of Connection interface. -type MockConnection struct { - ctrl *gomock.Controller - recorder *MockConnectionMockRecorder - isgomock struct{} -} - -// MockConnectionMockRecorder is the mock recorder for MockConnection. -type MockConnectionMockRecorder struct { - mock *MockConnection -} - -// NewMockConnection creates a new mock instance. -func NewMockConnection(ctrl *gomock.Controller) *MockConnection { - mock := &MockConnection{ctrl: ctrl} - mock.recorder = &MockConnectionMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockConnection) EXPECT() *MockConnectionMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockConnection) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockConnectionMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockConnection)(nil).Close)) -} - -// DDService mocks base method. -func (m *MockConnection) DDService(config *Config) DDService { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DDService", config) - ret0, _ := ret[0].(DDService) - return ret0 -} - -// DDService indicates an expected call of DDService. -func (mr *MockConnectionMockRecorder) DDService(config any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DDService", reflect.TypeOf((*MockConnection)(nil).DDService), config) -} - -// MockDDService is a mock of DDService interface. -type MockDDService struct { - ctrl *gomock.Controller - recorder *MockDDServiceMockRecorder - isgomock struct{} -} - -// MockDDServiceMockRecorder is the mock recorder for MockDDService. -type MockDDServiceMockRecorder struct { - mock *MockDDService -} - -// NewMockDDService creates a new mock instance. -func NewMockDDService(ctrl *gomock.Controller) *MockDDService { - mock := &MockDDService{ctrl: ctrl} - mock.recorder = &MockDDServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDDService) EXPECT() *MockDDServiceMockRecorder { - return m.recorder -} - -// GetStatus mocks base method. -func (m *MockDDService) GetStatus(ctx context.Context, req GetStatusRequest) (GetStatusResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStatus", ctx, req) - ret0, _ := ret[0].(GetStatusResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetStatus indicates an expected call of GetStatus. -func (mr *MockDDServiceMockRecorder) GetStatus(ctx, req any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatus", reflect.TypeOf((*MockDDService)(nil).GetStatus), ctx, req) -} - -// ImageRarenessScore mocks base method. -func (m *MockDDService) ImageRarenessScore(ctx context.Context, req RarenessScoreRequest) (ImageRarenessScoreResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImageRarenessScore", ctx, req) - ret0, _ := ret[0].(ImageRarenessScoreResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ImageRarenessScore indicates an expected call of ImageRarenessScore. -func (mr *MockDDServiceMockRecorder) ImageRarenessScore(ctx, req any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageRarenessScore", reflect.TypeOf((*MockDDService)(nil).ImageRarenessScore), ctx, req) -} diff --git a/pkg/dd/dd_server_client.go b/pkg/dd/dd_server_client.go deleted file mode 100644 index 5f927805..00000000 --- a/pkg/dd/dd_server_client.go +++ /dev/null @@ -1,24 +0,0 @@ -package dd - -import ( - dd "github.com/LumeraProtocol/supernode/v2/gen/dupedetection" -) - -type ddServerClientImpl struct { - config *Config - conn *clientConn - ddService dd.DupeDetectionServerClient -} - -// NewDDServerClient returns a new dd-server-client instance. -func newDDServerClient(conn *clientConn, c *Config) DDService { - return &ddServerClientImpl{ - config: c, - conn: conn, - ddService: dd.NewDupeDetectionServerClient(conn), - } -} - -func (c *ddServerClientImpl) Close() { - c.conn.Close() -} diff --git a/pkg/dd/image_rareness.go b/pkg/dd/image_rareness.go deleted file mode 100644 index 74fec800..00000000 --- a/pkg/dd/image_rareness.go +++ /dev/null @@ -1,108 +0,0 @@ -package dd - -import ( - "context" - "fmt" - - ddService "github.com/LumeraProtocol/supernode/v2/gen/dupedetection" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/net" -) - -type RarenessScoreRequest struct { - Filepath string -} - -type ImageRarenessScoreResponse struct { - PastelBlockHashWhenRequestSubmitted string - PastelBlockHeightWhenRequestSubmitted string - UtcTimestampWhenRequestSubmitted string - PastelIdOfSubmitter string - PastelIdOfRegisteringSupernode_1 string - PastelIdOfRegisteringSupernode_2 string - PastelIdOfRegisteringSupernode_3 string - IsPastelOpenapiRequest bool - ImageFilePath string - DupeDetectionSystemVersion string - IsLikelyDupe bool - IsRareOnInternet bool - OverallRarenessScore float32 - PctOfTop_10MostSimilarWithDupeProbAbove_25Pct float32 - PctOfTop_10MostSimilarWithDupeProbAbove_33Pct float32 - PctOfTop_10MostSimilarWithDupeProbAbove_50Pct float32 - RarenessScoresTableJsonCompressedB64 string - InternetRareness *ddService.InternetRareness - OpenNsfwScore float32 - AlternativeNsfwScores *ddService.AltNsfwScores - ImageFingerprintOfCandidateImageFile []float64 - CollectionNameString string - HashOfCandidateImageFile string - OpenApiGroupIdString string - GroupRarenessScore float32 - CandidateImageThumbnailWebpAsBase64String string - DoesNotImpactTheFollowingCollectionStrings string - IsInvalidSenseRequest bool - InvalidSenseRequestReason string - SimilarityScoreToFirstEntryInCollection float32 - CpProbability float32 - ChildProbability float32 - ImageFingerprintSetChecksum string -} - -// ImageRarenessScore gets the image rareness score -func (c *ddServerClientImpl) ImageRarenessScore(ctx context.Context, req RarenessScoreRequest) (ImageRarenessScoreResponse, error) { - ctx = net.AddCorrelationID(ctx) - fields := logtrace.Fields{ - logtrace.FieldMethod: "ImageRarenessScore", - logtrace.FieldRequest: req, - } - logtrace.Debug(ctx, "getting image rareness score", fields) - - res, err := c.ddService.ImageRarenessScore(ctx, &ddService.RarenessScoreRequest{ImageFilepath: req.Filepath}) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to get image rareness score", fields) - return ImageRarenessScoreResponse{}, fmt.Errorf("dd image rareness score error: %w", err) - } - - logtrace.Debug(ctx, "successfully got image rareness score", fields) - return toImageRarenessScoreResponse(res), nil -} - -func toImageRarenessScoreResponse(reply *ddService.ImageRarenessScoreReply) ImageRarenessScoreResponse { - return ImageRarenessScoreResponse{ - PastelBlockHashWhenRequestSubmitted: reply.PastelBlockHashWhenRequestSubmitted, - PastelBlockHeightWhenRequestSubmitted: reply.PastelBlockHeightWhenRequestSubmitted, - UtcTimestampWhenRequestSubmitted: reply.UtcTimestampWhenRequestSubmitted, - PastelIdOfSubmitter: reply.PastelIdOfSubmitter, - PastelIdOfRegisteringSupernode_1: reply.PastelIdOfRegisteringSupernode_1, - PastelIdOfRegisteringSupernode_2: reply.PastelIdOfRegisteringSupernode_2, - PastelIdOfRegisteringSupernode_3: reply.PastelIdOfRegisteringSupernode_3, - IsPastelOpenapiRequest: reply.IsPastelOpenapiRequest, - ImageFilePath: reply.ImageFilePath, - DupeDetectionSystemVersion: reply.DupeDetectionSystemVersion, - IsLikelyDupe: reply.IsLikelyDupe, - IsRareOnInternet: reply.IsRareOnInternet, - OverallRarenessScore: reply.OverallRarenessScore, - PctOfTop_10MostSimilarWithDupeProbAbove_25Pct: reply.PctOfTop_10MostSimilarWithDupeProbAbove_25Pct, - PctOfTop_10MostSimilarWithDupeProbAbove_33Pct: reply.PctOfTop_10MostSimilarWithDupeProbAbove_33Pct, - PctOfTop_10MostSimilarWithDupeProbAbove_50Pct: reply.PctOfTop_10MostSimilarWithDupeProbAbove_50Pct, - RarenessScoresTableJsonCompressedB64: reply.RarenessScoresTableJsonCompressedB64, - InternetRareness: reply.InternetRareness, - OpenNsfwScore: reply.OpenNsfwScore, - AlternativeNsfwScores: reply.AlternativeNsfwScores, - ImageFingerprintOfCandidateImageFile: reply.ImageFingerprintOfCandidateImageFile, - CollectionNameString: reply.CollectionNameString, - HashOfCandidateImageFile: reply.HashOfCandidateImageFile, - OpenApiGroupIdString: reply.OpenApiGroupIdString, - GroupRarenessScore: reply.GroupRarenessScore, - CandidateImageThumbnailWebpAsBase64String: reply.CandidateImageThumbnailWebpAsBase64String, - DoesNotImpactTheFollowingCollectionStrings: reply.DoesNotImpactTheFollowingCollectionStrings, - IsInvalidSenseRequest: reply.IsInvalidSenseRequest, - InvalidSenseRequestReason: reply.InvalidSenseRequestReason, - SimilarityScoreToFirstEntryInCollection: reply.SimilarityScoreToFirstEntryInCollection, - CpProbability: reply.CpProbability, - ChildProbability: reply.ChildProbability, - ImageFingerprintSetChecksum: reply.ImageFingerprintSetChecksum, - } -} diff --git a/pkg/dd/interfaces.go b/pkg/dd/interfaces.go deleted file mode 100644 index 45b196d3..00000000 --- a/pkg/dd/interfaces.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:generate mockgen -destination=dd_mock.go -package=dd -source=interfaces.go - -package dd - -import "context" - -// ClientInterface represents a base connection interface. -type ClientInterface interface { - // Connect connects to the server at the given address. - Connect(ctx context.Context, address string) (Connection, error) -} - -// Connection represents a client connection -type Connection interface { - // Close closes connection. - Close() error - - // DDService returns a new dd-service stream. - DDService(config *Config) DDService - - // FIXME: - // Done returns a channel that's closed when connection is shutdown. - //Done() <-chan struct{} -} - -// DDService contains methods for request services from dd-service. -type DDService interface { - ImageRarenessScore(ctx context.Context, req RarenessScoreRequest) (ImageRarenessScoreResponse, error) - GetStatus(ctx context.Context, req GetStatusRequest) (GetStatusResponse, error) -} diff --git a/pkg/dd/status.go b/pkg/dd/status.go deleted file mode 100644 index 812b62d6..00000000 --- a/pkg/dd/status.go +++ /dev/null @@ -1,44 +0,0 @@ -package dd - -import ( - "context" - "fmt" - - ddService "github.com/LumeraProtocol/supernode/v2/gen/dupedetection" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/net" -) - -type GetStatusRequest struct { -} - -type GetStatusResponse struct { - Version string - TaskCount *ddService.TaskCount - TaskMetrics *ddService.TaskMetrics -} - -// GetStatus retrieves the status. -func (c *ddServerClientImpl) GetStatus(ctx context.Context, req GetStatusRequest) (GetStatusResponse, error) { - ctx = net.AddCorrelationID(ctx) - - fields := logtrace.Fields{ - logtrace.FieldMethod: "GetStatus", - logtrace.FieldRequest: req, - } - logtrace.Debug(ctx, "getting status", fields) - - res, err := c.ddService.GetStatus(ctx, &ddService.GetStatusRequest{}) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to get status", fields) - return GetStatusResponse{}, fmt.Errorf("dd get status error: %w", err) - } - - logtrace.Debug(ctx, "successfully got status", fields) - return GetStatusResponse{ - Version: res.GetVersion(), - TaskCount: res.GetTaskCount(), - TaskMetrics: res.GetTaskMetrics(), - }, nil -} diff --git a/sn-manager/internal/github/client.go b/pkg/github/client.go similarity index 96% rename from sn-manager/internal/github/client.go rename to pkg/github/client.go index 70e99d6a..721a02e1 100644 --- a/sn-manager/internal/github/client.go +++ b/pkg/github/client.go @@ -6,6 +6,7 @@ import ( "io" "log" "net/http" + "strings" "time" ) @@ -127,7 +128,7 @@ func (c *Client) ListReleases() ([]*Release, error) { func (c *Client) GetLatestStableRelease() (*Release, error) { // Try the latest release endpoint first (single API call) release, err := c.GetLatestRelease() - if err == nil && !release.Draft && !release.Prerelease { + if err == nil && !release.Draft && !release.Prerelease && !strings.Contains(release.TagName, "-") { return release, nil } @@ -139,7 +140,7 @@ func (c *Client) GetLatestStableRelease() (*Release, error) { // Filter for stable releases (not draft, not prerelease) for _, release := range releases { - if !release.Draft && !release.Prerelease { + if !release.Draft && !release.Prerelease && !strings.Contains(release.TagName, "-") { return release, nil } } diff --git a/pkg/lumera/modules/auth/impl.go b/pkg/lumera/modules/auth/impl.go index a3ad3bca..4304e2dd 100644 --- a/pkg/lumera/modules/auth/impl.go +++ b/pkg/lumera/modules/auth/impl.go @@ -45,8 +45,7 @@ func (m *module) Verify(ctx context.Context, accAddress string, data, signature if err != nil { return fmt.Errorf("invalid address: %w", err) } - - logtrace.Debug(ctx, "Verifying signature", logtrace.Fields{"address": addr.String()}) + logtrace.Info(ctx, "auth: verify signature start", logtrace.Fields{"address": addr.String()}) // Use Account RPC instead of AccountInfo to get the full account with public key accResp, err := m.client.Account(ctx, &authtypes.QueryAccountRequest{ @@ -66,10 +65,10 @@ func (m *module) Verify(ctx context.Context, accAddress string, data, signature if pubKey == nil { return fmt.Errorf("public key is nil") } - logtrace.Debug(ctx, "Public key retrieved", logtrace.Fields{"pubKey": pubKey.String()}) + logtrace.Info(ctx, "auth: public key loaded", logtrace.Fields{"address": addr.String()}) if !pubKey.VerifySignature(data, signature) { return fmt.Errorf("invalid signature") } - + logtrace.Info(ctx, "auth: verify signature ok", logtrace.Fields{"address": addr.String()}) return nil } diff --git a/pkg/lumera/modules/supernode/impl.go b/pkg/lumera/modules/supernode/impl.go index d0b633a8..064e30c0 100644 --- a/pkg/lumera/modules/supernode/impl.go +++ b/pkg/lumera/modules/supernode/impl.go @@ -30,6 +30,7 @@ func newModule(conn *grpc.ClientConn) (Module, error) { func (m *module) GetTopSuperNodesForBlock(ctx context.Context, blockHeight uint64) (*types.QueryGetTopSuperNodesForBlockResponse, error) { resp, err := m.client.GetTopSuperNodesForBlock(ctx, &types.QueryGetTopSuperNodesForBlockRequest{ BlockHeight: int32(blockHeight), + State: types.SuperNodeStateActive.String(), }) if err != nil { return nil, fmt.Errorf("failed to get top supernodes: %w", err) diff --git a/pkg/raptorq/helper.go b/pkg/raptorq/helper.go deleted file mode 100644 index ea36b1ab..00000000 --- a/pkg/raptorq/helper.go +++ /dev/null @@ -1,49 +0,0 @@ -package raptorq - -import ( - "bytes" - "context" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/cosmos/btcutil/base58" - "strconv" -) - -const ( - InputEncodeFileName = "input.data" - SeparatorByte byte = 46 // separator in dd_and_fingerprints.signature i.e. '.' -) - -// GetIDFiles generates ID Files for dd_and_fingerprints files and rq_id files -// file is b64 encoded file appended with signatures and compressed, ic is the initial counter -// and max is the number of ids to generate -func GetIDFiles(ctx context.Context, file []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) { - idFiles := make([][]byte, 0, max) - ids = make([]string, 0, max) - var buffer bytes.Buffer - - for i := uint32(0); i < max; i++ { - buffer.Reset() - counter := ic + i - - buffer.Write(file) - buffer.WriteByte(SeparatorByte) - buffer.WriteString(strconv.Itoa(int(counter))) // Using the string representation to maintain backward compatibility - - compressedData, err := utils.HighCompress(ctx, buffer.Bytes()) // Ensure you're using the same compression level - if err != nil { - return ids, idFiles, errors.Errorf("compress identifiers file: %w", err) - } - - idFiles = append(idFiles, compressedData) - - hash, err := utils.Blake3Hash(compressedData) - if err != nil { - return ids, idFiles, errors.Errorf("sha3-256-hash error getting an id file: %w", err) - } - - ids = append(ids, base58.Encode(hash)) - } - - return ids, idFiles, nil -} diff --git a/pkg/task/handle.go b/pkg/task/handle.go new file mode 100644 index 00000000..74f6e406 --- /dev/null +++ b/pkg/task/handle.go @@ -0,0 +1,66 @@ +package task + +import ( + "context" + "sync" + "time" + + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" +) + +// Handle manages a running task with an optional watchdog. +// It ensures Start and End are paired, logs start/end, and auto-ends on timeout. +type Handle struct { + tr Tracker + service string + id string + stop chan struct{} + once sync.Once +} + +// Start starts tracking a task and returns a Handle that will ensure the +// task is ended. A watchdog is started to auto-end the task after timeout +// to avoid indefinitely stuck running tasks in status reporting. +func StartWith(tr Tracker, ctx context.Context, service, id string, timeout time.Duration) *Handle { + if tr == nil || service == "" || id == "" { + return &Handle{} + } + tr.Start(service, id) + logtrace.Info(ctx, "task: started", logtrace.Fields{"service": service, "task_id": id}) + + g := &Handle{tr: tr, service: service, id: id, stop: make(chan struct{})} + if timeout > 0 { + go func() { + select { + case <-time.After(timeout): + g.endWith(ctx, true) + case <-g.stop: + } + }() + } + return g +} + +// End stops tracking the task. Safe to call multiple times. +func (g *Handle) End(ctx context.Context) { + g.endWith(ctx, false) +} + +// EndWith ends the guard and logs accordingly. If expired is true, +// it emits a warning and ends the task to avoid stuck status. +func (g *Handle) endWith(ctx context.Context, expired bool) { + if g == nil || g.service == "" || g.id == "" { + return + } + g.once.Do(func() { + close(g.stop) + if g.tr != nil { + g.tr.End(g.service, g.id) + } + if expired { + logtrace.Warn(ctx, "task: watchdog expired", logtrace.Fields{"service": g.service, "task_id": g.id}) + } else { + logtrace.Info(ctx, "task: ended", logtrace.Fields{"service": g.service, "task_id": g.id}) + } + }) +} diff --git a/pkg/task/task.go b/pkg/task/task.go new file mode 100644 index 00000000..8d0c0052 --- /dev/null +++ b/pkg/task/task.go @@ -0,0 +1,79 @@ +// Package task provides a lean, concurrency-safe, in-memory tracker for +// live tasks running inside a service. It is designed to be generic and +// reusable across multiple features (e.g., cascade upload/download) and +// only tracks tasks while the enclosing RPC/handler is alive. No +// persistence, progress reporting, or background processing is included. +package task + +import "sync" + +// Tracker defines a minimal interface for tracking live tasks per service. +// Implementations must be concurrency-safe. All methods are non-blocking +// and best-effort; invalid inputs are ignored. +type Tracker interface { + Start(service, taskID string) + End(service, taskID string) + Snapshot() map[string][]string +} + +// InMemoryTracker is a lean, concurrency-safe tracker of live tasks. +// It stores only in-memory state for the lifetime of the process and +// returns copies when asked for a snapshot to ensure isolation. +type InMemoryTracker struct { + mu sync.RWMutex + // service -> set(taskID) + data map[string]map[string]struct{} +} + +// New creates and returns a new in-memory tracker. +func New() *InMemoryTracker { + return &InMemoryTracker{data: make(map[string]map[string]struct{})} +} + +// Start marks a task as running under a given service. Empty arguments +// are ignored. Calling Start with the same (service, taskID) pair is idempotent. +func (t *InMemoryTracker) Start(service, taskID string) { + if service == "" || taskID == "" { + return + } + t.mu.Lock() + m, ok := t.data[service] + if !ok { + m = make(map[string]struct{}) + t.data[service] = m + } + m[taskID] = struct{}{} + t.mu.Unlock() +} + +// End removes a running task under a given service. Empty arguments +// are ignored. Removing a non-existent (service, taskID) pair is a no-op. +func (t *InMemoryTracker) End(service, taskID string) { + if service == "" || taskID == "" { + return + } + t.mu.Lock() + if m, ok := t.data[service]; ok { + delete(m, taskID) + if len(m) == 0 { + delete(t.data, service) + } + } + t.mu.Unlock() +} + +// Snapshot returns a copy of the current running tasks per service. +// The returned map and slices are independent of internal state. +func (t *InMemoryTracker) Snapshot() map[string][]string { + out := make(map[string][]string) + t.mu.RLock() + for svc, m := range t.data { + ids := make([]string, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + out[svc] = ids + } + t.mu.RUnlock() + return out +} diff --git a/pkg/task/task_test.go b/pkg/task/task_test.go new file mode 100644 index 00000000..1550bc37 --- /dev/null +++ b/pkg/task/task_test.go @@ -0,0 +1,157 @@ +package task + +import ( + "context" + "sync" + "testing" + "time" +) + +func TestStartEndSnapshot(t *testing.T) { + tr := New() + + // Initially empty + if snap := tr.Snapshot(); len(snap) != 0 { + t.Fatalf("expected empty snapshot, got %#v", snap) + } + + // Start two tasks under same service + tr.Start("svc", "id1") + tr.Start("svc", "id2") + + snap := tr.Snapshot() + ids, ok := snap["svc"] + if !ok { + t.Fatalf("expected service 'svc' in snapshot") + } + if len(ids) != 2 { + t.Fatalf("expected 2 ids, got %d (%v)", len(ids), ids) + } + + // End one task + tr.End("svc", "id1") + snap = tr.Snapshot() + ids = snap["svc"] + if len(ids) != 1 { + t.Fatalf("expected 1 id, got %d (%v)", len(ids), ids) + } + if ids[0] != "id2" && ids[0] != "id1" { // order not guaranteed; check that id2 remains by set membership + // Build a small set for clarity + m := map[string]struct{}{} + for _, v := range ids { + m[v] = struct{}{} + } + if _, ok := m["id2"]; !ok { + t.Fatalf("expected id2 to remain, got %v", ids) + } + } + + // End last task + tr.End("svc", "id2") + snap = tr.Snapshot() + if _, ok := snap["svc"]; ok { + t.Fatalf("expected service removed after last task ended, got %v", snap) + } +} + +func TestInvalidInputsAndIsolation(t *testing.T) { + tr := New() + + // Invalid inputs should be ignored + tr.Start("", "id") + tr.Start("svc", "") + tr.End("", "id") + tr.End("svc", "") + if snap := tr.Snapshot(); len(snap) != 0 { + t.Fatalf("expected empty snapshot for invalid inputs, got %#v", snap) + } + + // Snapshot must be a copy + tr.Start("svc", "id") + snap := tr.Snapshot() + // mutate snapshot map and slice + delete(snap, "svc") + snap2 := tr.Snapshot() + if _, ok := snap2["svc"]; !ok { + t.Fatalf("mutating snapshot should not affect tracker state") + } +} + +// TestConcurrentAccessNoPanic ensures that concurrent Start/End/Snapshot +// operations do not panic due to unsafe map access. +func TestConcurrentAccessNoPanic(t *testing.T) { + tr := New() + + // Run a mix of writers and readers concurrently. + var wg sync.WaitGroup + startWriters := 8 + snapReaders := 4 + loops := 1000 + + // Writers: repeatedly start/end tasks across a few services. + for w := 0; w < startWriters; w++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + for i := 0; i < loops; i++ { + svc := "svc" + string('A'+rune(id%3)) // svcA, svcB, svcC + tid := svc + ":t" + fmtInt(i%5) + tr.Start(svc, tid) + if i%2 == 0 { + tr.End(svc, tid) + } + } + }(w) + } + + // Readers: take snapshots concurrently. + for r := 0; r < snapReaders; r++ { + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < loops; i++ { + _ = tr.Snapshot() + } + }() + } + + // If there is any concurrent map access bug, the test runner would panic. + done := make(chan struct{}) + go func() { wg.Wait(); close(done) }() + select { + case <-done: + // ok + case <-time.After(5 * time.Second): + t.Fatal("concurrent access test timed out") + } +} + +// fmtInt provides a tiny int-to-string helper to avoid importing strconv. +func fmtInt(i int) string { return string('0' + rune(i)) } + +func TestHandleIdempotentAndWatchdog(t *testing.T) { + tr := New() + ctx := context.Background() + + // Idempotent End + g := StartWith(tr, ctx, "svc.handle", "id-1", 0) + g.End(ctx) + g.End(ctx) // no panic, no double-end crash + + // Watchdog auto-end: use a small timeout + g2 := StartWith(tr, ctx, "svc.handle", "id-2", 50*time.Millisecond) + _ = g2 // ensure handle stays referenced until timeout path + // Do not call End; let the watchdog fire + time.Sleep(120 * time.Millisecond) + + // After watchdog, the task should not be listed + snap := tr.Snapshot() + if ids, ok := snap["svc.handle"]; ok { + // If still present, ensure id-2 is not in the list + for _, id := range ids { + if id == "id-2" { + t.Fatalf("expected watchdog to remove id-2 from svc.handle; snapshot: %v", ids) + } + } + } +} diff --git a/profile_cascade.sh b/profile_cascade.sh index 7fe0af5e..9b6fe71a 100755 --- a/profile_cascade.sh +++ b/profile_cascade.sh @@ -4,7 +4,7 @@ # Samples heap every 30 seconds during cascade downloads # Configuration - modify these as needed -PROFILE_URL="http://localhost:6062/debug/pprof/heap" +PROFILE_URL="http://localhost:8002/api/v1/debug/raw/pprof/heap" INTERVAL=30 TIMESTAMP=$(date +%Y%m%d_%H%M%S) PROFILE_DIR="profiles_${TIMESTAMP}" diff --git a/proto/dupedetection/dd-server.proto b/proto/dupedetection/dd-server.proto deleted file mode 100644 index 0217aece..00000000 --- a/proto/dupedetection/dd-server.proto +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -syntax = "proto3"; - -option go_package = "github.com/LumeraProtocol/supernode/v2/gen/dupedetection"; - -package dupedetection; - -service DupeDetectionServer { - rpc ImageRarenessScore(RarenessScoreRequest) returns(ImageRarenessScoreReply); - rpc GetStatus(GetStatusRequest) returns(GetStatusResponse); -} - -message RarenessScoreRequest { - string image_filepath = 1; - string pastel_block_hash_when_request_submitted = 2; - string pastel_block_height_when_request_submitted = 3; - string utc_timestamp_when_request_submitted = 4; - string pastel_id_of_submitter = 5; - string pastel_id_of_registering_supernode_1 = 6; - string pastel_id_of_registering_supernode_2 = 7; - string pastel_id_of_registering_supernode_3 = 8; - bool is_pastel_openapi_request = 9; - string open_api_group_id_string = 10; - string collection_name_string = 11; -} - -message ImageRarenessScoreReply { - string pastel_block_hash_when_request_submitted = 1; - string pastel_block_height_when_request_submitted = 2; - string utc_timestamp_when_request_submitted = 3; - string pastel_id_of_submitter = 4; - string pastel_id_of_registering_supernode_1 = 5; - string pastel_id_of_registering_supernode_2 = 6; - string pastel_id_of_registering_supernode_3 = 7; - bool is_pastel_openapi_request = 8; - string image_file_path = 9; - string dupe_detection_system_version = 10; - bool is_likely_dupe = 11; - bool is_rare_on_internet = 12; - float overall_rareness_score = 13; - float pct_of_top_10_most_similar_with_dupe_prob_above_25pct = 14; - float pct_of_top_10_most_similar_with_dupe_prob_above_33pct = 15; - float pct_of_top_10_most_similar_with_dupe_prob_above_50pct = 16; - string rareness_scores_table_json_compressed_b64 = 17; - InternetRareness internet_rareness = 18; - float open_nsfw_score = 19; - AltNsfwScores alternative_nsfw_scores = 20; - repeated double image_fingerprint_of_candidate_image_file = 21; - string collection_name_string = 22; - string hash_of_candidate_image_file = 23; - string open_api_group_id_string = 24; - float group_rareness_score = 25; - string candidate_image_thumbnail_webp_as_base64_string = 26; - string does_not_impact_the_following_collection_strings = 27; - bool is_invalid_sense_request = 28; - string invalid_sense_request_reason = 29; - float similarity_score_to_first_entry_in_collection = 30; - float cp_probability = 31; - float child_probability = 32; - string image_fingerprint_set_checksum = 33; -} - - -message InternetRareness { - string rare_on_internet_summary_table_as_json_compressed_b64 = 1; - string rare_on_internet_graph_json_compressed_b64 = 2; - string alternative_rare_on_internet_dict_as_json_compressed_b64 = 3; - uint32 min_number_of_exact_matches_in_page = 4; - string earliest_available_date_of_internet_results = 5; -} - -message AltNsfwScores { - float drawings = 1; - float hentai = 2; - float neutral = 3; - float porn = 4; - float sexy = 5; -} - -message GetStatusRequest {} - -message TaskCount { - int32 max_concurrent = 1; - int32 executing = 2; - int32 waiting_in_queue = 3; - int32 succeeded = 4; - int32 failed = 5; - int32 cancelled = 6; -} - -message TaskMetrics { - float average_task_wait_time_secs = 1; - float max_task_wait_time_secs = 2; - float average_task_execution_time_secs = 3; - int64 average_task_virtual_memory_usage_bytes = 4; - int64 average_task_rss_memory_usage_bytes = 5; - int64 peak_task_rss_memory_usage_bytes = 6; - int64 peak_task_vms_memory_usage_bytes = 7; -} - -message GetStatusResponse { - string version = 1; - TaskCount task_count = 2; - TaskMetrics task_metrics = 3; -} diff --git a/proto/raptorq/raptorq.proto b/proto/raptorq/raptorq.proto deleted file mode 100644 index 07db9baa..00000000 --- a/proto/raptorq/raptorq.proto +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. -syntax = "proto3"; - -option go_package = "github.com/LumeraProtocol/supernode/v2/gen/raptorq"; - -package raptorq; - -service RaptorQ { - rpc EncodeMetaData(EncodeMetaDataRequest) returns(EncodeMetaDataReply); - rpc Encode(EncodeRequest) returns(EncodeReply); - rpc Decode(DecodeRequest) returns(DecodeReply); -} - -message EncodeMetaDataRequest { - string path = 1; - uint32 files_number = 2; - string block_hash = 3; - string pastel_id = 4; -} - -message EncodeMetaDataReply { - bytes encoder_parameters = 1; - uint32 symbols_count = 2; - string path = 3; -} - -message EncodeRequest { - string path = 1; -} - -message EncodeReply { - bytes encoder_parameters = 1; - uint32 symbols_count = 2; - string path = 3; -} - -message DecodeRequest { - bytes encoder_parameters = 1; - string path = 2; -} - -message DecodeReply { - string path = 1; -} \ No newline at end of file diff --git a/proto/supernode/service.proto b/proto/supernode/service.proto index 9725f84a..d51de355 100644 --- a/proto/supernode/service.proto +++ b/proto/supernode/service.proto @@ -12,12 +12,79 @@ service SupernodeService { get: "/api/v1/status" }; } - + rpc ListServices(ListServicesRequest) returns (ListServicesResponse) { option (google.api.http) = { get: "/api/v1/services" }; } + + // Raw pprof endpoints - return standard pprof output directly + rpc GetRawPprof(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof" + }; + } + + rpc GetRawPprofHeap(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/heap" + }; + } + + rpc GetRawPprofGoroutine(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/goroutine" + }; + } + + rpc GetRawPprofAllocs(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/allocs" + }; + } + + rpc GetRawPprofBlock(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/block" + }; + } + + rpc GetRawPprofMutex(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/mutex" + }; + } + + rpc GetRawPprofThreadcreate(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/threadcreate" + }; + } + + rpc GetRawPprofProfile(RawPprofCpuRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/profile" + }; + } + + rpc GetRawPprofCmdline(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/cmdline" + }; + } + + rpc GetRawPprofSymbol(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/symbol" + }; + } + + rpc GetRawPprofTrace(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/trace" + }; + } } message ListServicesRequest {} @@ -32,3 +99,16 @@ message ServiceInfo { repeated string methods = 2; } +// Raw pprof request/response messages +message RawPprofRequest { + int32 debug = 1; // Debug level (0 for binary, >0 for text) +} + +message RawPprofCpuRequest { + int32 seconds = 1; // CPU profile duration in seconds (default 30) +} + +message RawPprofResponse { + bytes data = 1; // Raw pprof data exactly as returned by runtime/pprof +} + diff --git a/proto/supernode/status.proto b/proto/supernode/status.proto index 7cafe908..d944d614 100644 --- a/proto/supernode/status.proto +++ b/proto/supernode/status.proto @@ -48,7 +48,7 @@ message StatusResponse { repeated string task_ids = 2; int32 task_count = 3; } - + // Network information message Network { int32 peers_count = 1; // Number of connected peers in P2P network @@ -127,7 +127,6 @@ message StatusResponse { DatabaseStats database = 5; DiskStatus disk = 6; } - + P2PMetrics p2p_metrics = 9; } - diff --git a/sdk/action/client.go b/sdk/action/client.go index 596e80f9..296cba9c 100644 --- a/sdk/action/client.go +++ b/sdk/action/client.go @@ -23,6 +23,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" "github.com/LumeraProtocol/supernode/v2/pkg/codec" keyringpkg "github.com/LumeraProtocol/supernode/v2/pkg/keyring" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/cosmos/cosmos-sdk/crypto/keyring" ) @@ -249,17 +250,14 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath if err != nil { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("stat file: %w", err) } - data, err := os.ReadFile(filePath) - if err != nil { - return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("read file: %w", err) - } // Build layout metadata only (no symbols). Supernodes will create symbols. rq := codec.NewRaptorQCodec("") - layout, err := rq.CreateMetadata(ctx, filePath) + metaResp, err := rq.CreateMetadata(ctx, codec.CreateMetadataRequest{Path: filePath}) if err != nil { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("raptorq create metadata: %w", err) } + layout := metaResp.Layout // Derive `max` from chain params, then create signatures and index IDs paramsResp, err := c.lumeraClient.GetActionParams(ctx) @@ -277,30 +275,34 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath // Pick a random initial counter in [1,100] rnd, _ := crand.Int(crand.Reader, big.NewInt(100)) ic := uint32(rnd.Int64() + 1) // 1..100 - signatures, _, err := cascadekit.CreateSignaturesWithKeyring(layout, c.keyring, c.config.Account.KeyName, ic, max) + // Create signatures from the layout struct + indexSignatureFormat, _, err := cascadekit.CreateSignaturesWithKeyring(layout, c.keyring, c.config.Account.KeyName, ic, max) if err != nil { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("create signatures: %w", err) } - // Compute data hash (blake3) as base64 - dataHashB64, err := cascadekit.ComputeBlake3DataHashB64(data) + // Compute data hash (blake3) as base64 using a streaming file hash to avoid loading entire file + h, err := utils.ComputeHashOfFile(filePath) if err != nil { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("hash data: %w", err) } + dataHashB64 := base64.StdEncoding.EncodeToString(h) // Derive file name from path fileName := filepath.Base(filePath) // Build metadata proto - meta := cascadekit.NewCascadeMetadata(dataHashB64, fileName, uint64(ic), signatures, public) + meta := cascadekit.NewCascadeMetadata(dataHashB64, fileName, uint64(ic), indexSignatureFormat, public) // Fetch params (already fetched) to get denom and expiration duration denom := paramsResp.Params.BaseActionFee.Denom exp := paramsResp.Params.ExpirationDuration - // Compute data size in KB for fee - kb := int(fi.Size()) / 1024 - feeResp, err := c.lumeraClient.GetActionFee(ctx, strconv.Itoa(kb)) + // Compute data size in KB for fee, rounding up to avoid underpaying + // Keep consistent with supernode verification which uses ceil(bytes/1024) + sizeBytes := fi.Size() + kb := (sizeBytes + 1023) / 1024 // int64 division + feeResp, err := c.lumeraClient.GetActionFee(ctx, strconv.FormatInt(kb, 10)) if err != nil { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("get action fee: %w", err) } @@ -316,15 +318,11 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath // GenerateStartCascadeSignatureFromFile computes blake3(file) and signs it with the configured key. // Returns base64-encoded signature suitable for StartCascade. func (c *ClientImpl) GenerateStartCascadeSignatureFromFile(ctx context.Context, filePath string) (string, error) { - data, err := os.ReadFile(filePath) - if err != nil { - return "", fmt.Errorf("read file: %w", err) - } - hash, err := cascadekit.ComputeBlake3Hash(data) + h, err := utils.ComputeHashOfFile(filePath) if err != nil { return "", fmt.Errorf("blake3: %w", err) } - sig, err := keyringpkg.SignBytes(c.keyring, c.config.Account.KeyName, hash) + sig, err := keyringpkg.SignBytes(c.keyring, c.config.Account.KeyName, h) if err != nil { return "", fmt.Errorf("sign hash: %w", err) } diff --git a/sdk/adapters/lumera/adapter.go b/sdk/adapters/lumera/adapter.go index 042c2273..3e21627c 100644 --- a/sdk/adapters/lumera/adapter.go +++ b/sdk/adapters/lumera/adapter.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "sort" + "time" "github.com/LumeraProtocol/supernode/v2/sdk/log" @@ -14,7 +15,18 @@ import ( "github.com/cosmos/cosmos-sdk/crypto/keyring" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + ristretto "github.com/dgraph-io/ristretto/v2" "github.com/golang/protobuf/proto" + "golang.org/x/sync/singleflight" +) + +const ( + // Cache tuning: tiny LFU with TTL to avoid stale long-term entries + cacheNumCounters = 1_000 + cacheMaxCost = 100 + cacheBufferItems = 64 + cacheItemCost = 1 + cacheTTL = time.Hour ) //go:generate mockery --name=Client --output=testutil/mocks --outpkg=mocks --filename=lumera_mock.go @@ -54,6 +66,11 @@ type ConfigParams struct { type Adapter struct { client lumeraclient.Client logger log.Logger + + // Lightweight caches to reduce repeated chain lookups when used as a validator + accountCache *ristretto.Cache[string, *authtypes.QueryAccountInfoResponse] + supernodeCache *ristretto.Cache[string, *sntypes.SuperNode] + sf singleflight.Group } // NewAdapter creates a new Adapter with dependencies explicitly injected @@ -77,31 +94,74 @@ func NewAdapter(ctx context.Context, config ConfigParams, logger log.Logger) (Cl logger.Info(ctx, "Lumera adapter created successfully") + // Initialize small, bounded caches return &Adapter{ - client: client, - logger: logger, + client: client, + logger: logger, + accountCache: newStringCache[*authtypes.QueryAccountInfoResponse](), + supernodeCache: newStringCache[*sntypes.SuperNode](), }, nil } +func newStringCache[T any]() *ristretto.Cache[string, T] { + c, _ := ristretto.NewCache(&ristretto.Config[string, T]{ + NumCounters: cacheNumCounters, + MaxCost: cacheMaxCost, + BufferItems: cacheBufferItems, + }) + return c +} + func (a *Adapter) GetSupernodeBySupernodeAddress(ctx context.Context, address string) (*sntypes.SuperNode, error) { - a.logger.Debug(ctx, "Getting supernode by address", "address", address) - resp, err := a.client.SuperNode().GetSupernodeBySupernodeAddress(ctx, address) + if address == "" { + return nil, fmt.Errorf("address cannot be empty") + } + // Fast path: cache hit + if a.supernodeCache != nil { + if val, ok := a.supernodeCache.Get(address); ok && val != nil { + return val, nil + } + } + + // Deduplicate concurrent lookups for same address + res, err, _ := a.sf.Do("sn:"+address, func() (any, error) { + // Double-check cache inside singleflight + if a.supernodeCache != nil { + if val, ok := a.supernodeCache.Get(address); ok && val != nil { + return val, nil + } + } + + a.logger.Debug(ctx, "Getting supernode by address", "address", address) + resp, err := a.client.SuperNode().GetSupernodeBySupernodeAddress(ctx, address) + if err != nil { + a.logger.Error(ctx, "Failed to get supernode", "address", address, "error", err) + return nil, fmt.Errorf("failed to get supernode: %w", err) + } + if resp == nil { + a.logger.Error(ctx, "Received nil response for supernode", "address", address) + return nil, fmt.Errorf("received nil response for supernode %s", address) + } + if a.supernodeCache != nil { + a.supernodeCache.SetWithTTL(address, resp, cacheItemCost, cacheTTL) + } + return resp, nil + }) if err != nil { - a.logger.Error(ctx, "Failed to get supernode", "address", address, "error", err) - return nil, fmt.Errorf("failed to get supernode: %w", err) + return nil, err } - if resp == nil { - a.logger.Error(ctx, "Received nil response for supernode", "address", address) - return nil, fmt.Errorf("received nil response for supernode %s", address) + sn, _ := res.(*sntypes.SuperNode) + if sn == nil { + return nil, fmt.Errorf("supernode is nil") } - a.logger.Debug(ctx, "Successfully retrieved supernode", "address", address) - return resp, nil + return sn, nil } func (a *Adapter) GetSupernodeWithLatestAddress(ctx context.Context, address string) (*SuperNodeInfo, error) { a.logger.Debug(ctx, "Getting supernode with latest address", "address", address) - resp, err := a.client.SuperNode().GetSupernodeBySupernodeAddress(ctx, address) + // Route through cached method to avoid duplicate chain calls + resp, err := a.GetSupernodeBySupernodeAddress(ctx, address) if err != nil { a.logger.Error(ctx, "Failed to get supernode", "address", address, "error", err) return nil, fmt.Errorf("failed to get supernode: %w", err) @@ -147,19 +207,49 @@ func (a *Adapter) GetSupernodeWithLatestAddress(ctx context.Context, address str } func (a *Adapter) AccountInfoByAddress(ctx context.Context, addr string) (*authtypes.QueryAccountInfoResponse, error) { - a.logger.Debug(ctx, "Getting account info by address", "address", addr) - resp, err := a.client.Auth().AccountInfoByAddress(ctx, addr) - if err != nil { - a.logger.Error(ctx, "Failed to get account info", "address", addr, "error", err) - return nil, fmt.Errorf("failed to get account info: %w", err) + if addr == "" { + return nil, fmt.Errorf("address cannot be empty") } - if resp == nil { - a.logger.Error(ctx, "Received nil response for account info", "address", addr) - return nil, fmt.Errorf("received nil response for account info %s", addr) + // Fast path: cache hit + if a.accountCache != nil { + if val, ok := a.accountCache.Get(addr); ok && val != nil { + return val, nil + } } - a.logger.Debug(ctx, "Successfully retrieved account info", "address", addr) - return resp, nil + // Deduplicate concurrent fetches + res, err, _ := a.sf.Do("acct:"+addr, func() (any, error) { + // Double-check cache inside singleflight window + if a.accountCache != nil { + if val, ok := a.accountCache.Get(addr); ok && val != nil { + return val, nil + } + } + + a.logger.Debug(ctx, "Getting account info by address", "address", addr) + resp, err := a.client.Auth().AccountInfoByAddress(ctx, addr) + if err != nil { + a.logger.Error(ctx, "Failed to get account info", "address", addr, "error", err) + return nil, fmt.Errorf("failed to get account info: %w", err) + } + if resp == nil { + a.logger.Error(ctx, "Received nil response for account info", "address", addr) + return nil, fmt.Errorf("received nil response for account info %s", addr) + } + if a.accountCache != nil { + a.accountCache.SetWithTTL(addr, resp, cacheItemCost, cacheTTL) + } + a.logger.Debug(ctx, "Successfully retrieved account info", "address", addr) + return resp, nil + }) + if err != nil { + return nil, err + } + ai, _ := res.(*authtypes.QueryAccountInfoResponse) + if ai == nil { + return nil, fmt.Errorf("account info is nil") + } + return ai, nil } func (a *Adapter) GetAction(ctx context.Context, actionID string) (Action, error) { diff --git a/sdk/adapters/supernodeservice/adapter.go b/sdk/adapters/supernodeservice/adapter.go index 9712915c..3195b694 100644 --- a/sdk/adapters/supernodeservice/adapter.go +++ b/sdk/adapters/supernodeservice/adapter.go @@ -225,7 +225,9 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca bytesRead += int64(n) progress := float64(bytesRead) / float64(totalBytes) * 100 - a.logger.Debug(ctx, "Sent data chunk", "chunkIndex", chunkIndex, "chunkSize", n, "progress", fmt.Sprintf("%.1f%%", progress)) + // Print upload progress directly to stdout + fmt.Printf("Upload progress: task_id=%s action_id=%s chunk_index=%d chunk_size=%d progress=%.1f%% bytes=%d/%d\n", + in.TaskId, in.ActionID, chunkIndex, n, progress, bytesRead, totalBytes) chunkIndex++ } @@ -477,6 +479,10 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( chunkIndex++ a.logger.Debug(ctx, "received chunk", "chunk_index", chunkIndex, "chunk_size", len(data), "bytes_written", bytesWritten) + + // Print download progress directly to stdout (similar to upload progress) + fmt.Printf("Download progress: action_id=%s chunk_index=%d chunk_size=%d bytes=%d\n", + in.ActionID, chunkIndex, len(data), bytesWritten) } } diff --git a/sdk/docs/cascade-timeouts.md b/sdk/docs/cascade-timeouts.md index 716804bc..7568dd28 100644 --- a/sdk/docs/cascade-timeouts.md +++ b/sdk/docs/cascade-timeouts.md @@ -34,8 +34,8 @@ This document explains how timeouts and deadlines are applied across the SDK cas 3) `sdk/task/cascade.go: CascadeTask.Run(ctx)` - Validates file size; fetches healthy supernodes; registers with one. -4) Discovery: `sdk/task/task.go: BaseTask.fetchSupernodes` → `BaseTask.isServing` - - `context.WithTimeout(parent, 10s)` for health probe (create client + `HealthCheck`). +4) Discovery: `sdk/task/task.go: BaseTask.fetchSupernodesWithLoads` (single-pass sanitize + load) + - `context.WithTimeout(parent, 10s)` per node: `HealthCheck` + `GetStatus` (peers, running_tasks) + balance. 5) Registration attempt: `sdk/task/cascade.go: attemptRegistration` - Client connect: uses task context (no deadline); gRPC injects a 30s default at connect if needed. @@ -136,7 +136,7 @@ This approach requires no request‑struct changes and preserves existing call s - `supernode/sdk/action/client.go` — entrypoints, no timeouts added. - `supernode/sdk/task/manager.go` — detaches from caller context; creates and runs tasks. - `supernode/sdk/task/timeouts.go` — `connectionTimeout` for health checks. - - `supernode/sdk/task/task.go` — discovery + health checks using `connectionTimeout`. + - `supernode/sdk/task/task.go` — discovery with single-pass probe (`fetchSupernodesWithLoads`) using `connectionTimeout`. - `supernode/sdk/adapters/supernodeservice/timeouts.go` — upload/processing timeout constants. - `supernode/sdk/adapters/supernodeservice/adapter.go` — upload and progress stream handling (phase timers + events). - `supernode/sdk/net/factory.go` — client options tuned for streaming. @@ -170,7 +170,7 @@ This document describes how the SDK applies timeouts and deadlines during cascad 1) `sdk/action/client.go: ClientImpl.StartCascade(ctx, ...)` — forwards `ctx` to the Task Manager. 2) `sdk/task/manager.go: ManagerImpl.CreateCascadeTask(...)` — detaches from caller (`context.WithCancel(context.Background())`). 3) `sdk/task/cascade.go: CascadeTask.Run(ctx)` — validates file size, discovers healthy supernodes, attempts registration. -4) `sdk/task/task.go: BaseTask.fetchSupernodes` → `BaseTask.isServing` — health probe with `connectionTimeout = 10s` per node. +4) `sdk/task/task.go: BaseTask.fetchSupernodesWithLoads` — single-pass probe with `connectionTimeout = 10s` per node (health, status, balance) and load snapshot. 5) `sdk/task/cascade.go: attemptRegistration` — creates client and calls `RegisterCascade` with task context. 6) `sdk/adapters/supernodeservice/adapter.go: CascadeSupernodeRegister` — applies phase timers: - Upload phase: send chunks and metadata; cancel if `cascadeUploadTimeout` elapses. diff --git a/sdk/net/impl.go b/sdk/net/impl.go index 77ac7de9..e597ccbb 100644 --- a/sdk/net/impl.go +++ b/sdk/net/impl.go @@ -3,6 +3,7 @@ package net import ( "context" "fmt" + "sync" "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials" @@ -29,13 +30,17 @@ type supernodeClient struct { // Verify interface compliance at compile time var _ SupernodeClient = (*supernodeClient)(nil) +// ensure ALTS protocols are registered once per process +var registerALTSOnce sync.Once + // NewSupernodeClient creates a new supernode client func NewSupernodeClient(ctx context.Context, logger log.Logger, keyring keyring.Keyring, factoryConfig FactoryConfig, targetSupernode lumera.Supernode, lumeraClient lumera.Client, clientOptions *client.ClientOptions, ) (SupernodeClient, error) { - // Register ALTS protocols, just like in the test - conn.RegisterALTSRecordProtocols() + // Register ALTS protocols once (process-wide). These are global and should not + // be unregistered per-connection to avoid impacting concurrent clients. + registerALTSOnce.Do(func() { conn.RegisterALTSRecordProtocols() }) // Validate required parameters if logger == nil { @@ -154,10 +159,6 @@ func (c *supernodeClient) Close(ctx context.Context) error { if c.conn != nil { c.logger.Debug(ctx, "Closing connection to supernode") err := c.conn.Close() - - // Cleanup ALTS protocols when client is closed - conn.UnregisterALTSRecordProtocols() - return err } return nil diff --git a/sdk/task/cascade.go b/sdk/task/cascade.go index c13b94a1..a2cdcd3a 100644 --- a/sdk/task/cascade.go +++ b/sdk/task/cascade.go @@ -46,7 +46,8 @@ func (t *CascadeTask) Run(ctx context.Context) error { return err } - t.LogEvent(ctx, event.SDKSupernodesFound, "Supernodes found.", event.EventData{event.KeyCount: len(supernodes)}) + // Log available candidates; streaming will happen within registration + t.LogEvent(ctx, event.SDKSupernodesFound, "Supernodes fetched", event.EventData{event.KeyCount: len(supernodes)}) // 2 - Register with the supernodes if err := t.registerWithSupernodes(ctx, supernodes); err != nil { @@ -72,34 +73,57 @@ func (t *CascadeTask) registerWithSupernodes(ctx context.Context, supernodes lum TaskId: t.TaskID, } + // Strict XOR-first qualification and attempts + fileSize := getFileSizeBytes(t.filePath) + var minRam uint64 + if fileSize > 0 { + minRam = uint64(fileSize) * uploadRAMMultiplier + } + ordered := t.orderByXORDistance(ctx, supernodes) + var lastErr error - for idx, sn := range supernodes { - // 1 + attempted := 0 + for i, sn := range ordered { + iteration := i + 1 + t.LogEvent(ctx, event.SDKRegistrationAttempt, "attempting registration with supernode", event.EventData{ event.KeySupernode: sn.GrpcEndpoint, event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: idx + 1, + event.KeyIteration: iteration, }) - if err := t.attemptRegistration(ctx, idx, sn, clientFactory, req); err != nil { - // + + // Re-check serving status just-in-time to avoid calling a node that became down/underpeered + // Ensure node qualifies before attempt + if !t.nodeQualifies(ctx, sn, minStorageThresholdBytes, minRam) { + continue + } + + attempted++ + if err := t.attemptRegistration(ctx, iteration-1, sn, clientFactory, req); err != nil { t.LogEvent(ctx, event.SDKRegistrationFailure, "registration with supernode failed", event.EventData{ event.KeySupernode: sn.GrpcEndpoint, event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: idx + 1, + event.KeyIteration: iteration, event.KeyError: err.Error(), }) lastErr = err continue } + t.LogEvent(ctx, event.SDKRegistrationSuccessful, "successfully registered with supernode", event.EventData{ event.KeySupernode: sn.GrpcEndpoint, event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: idx + 1, + event.KeyIteration: iteration, }) return nil // success } - - return fmt.Errorf("failed to upload to all supernodes: %w", lastErr) + if attempted == 0 { + return fmt.Errorf("no eligible supernodes to register") + } + if lastErr != nil { + return fmt.Errorf("failed to upload to all supernodes: %w", lastErr) + } + return fmt.Errorf("failed to upload to all supernodes") } func (t *CascadeTask) attemptRegistration(ctx context.Context, _ int, sn lumera.Supernode, factory *net.ClientFactory, req *supernodeservice.CascadeSupernodeRegisterRequest) error { diff --git a/sdk/task/download.go b/sdk/task/download.go index 2c727ae9..d9b2d800 100644 --- a/sdk/task/download.go +++ b/sdk/task/download.go @@ -36,14 +36,15 @@ func NewCascadeDownloadTask(base BaseTask, actionId string, outputPath string, s func (t *CascadeDownloadTask) Run(ctx context.Context) error { t.LogEvent(ctx, event.SDKTaskStarted, "Running cascade download task", nil) - // 1 – fetch super-nodes + // 1 – fetch super-nodes (plain) supernodes, err := t.fetchSupernodes(ctx, t.Action.Height) if err != nil { t.LogEvent(ctx, event.SDKSupernodesUnavailable, "super-nodes unavailable", event.EventData{event.KeyError: err.Error()}) t.LogEvent(ctx, event.SDKTaskFailed, "task failed", event.EventData{event.KeyError: err.Error()}) return err } - t.LogEvent(ctx, event.SDKSupernodesFound, "super-nodes found", event.EventData{event.KeyCount: len(supernodes)}) + // Log available candidates; streaming will happen within download phase + t.LogEvent(ctx, event.SDKSupernodesFound, "super-nodes fetched", event.EventData{event.KeyCount: len(supernodes)}) // 2 – download from super-nodes if err := t.downloadFromSupernodes(ctx, supernodes); err != nil { @@ -76,10 +77,13 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern } } - // Try supernodes sequentially, one by one (now sorted) + // Strict XOR-first qualification and attempts (downloads: storage-only threshold) + ordered := t.orderByXORDistance(ctx, supernodes) + var lastErr error - for idx, sn := range supernodes { - iteration := idx + 1 + attempted := 0 + for i, sn := range ordered { + iteration := i + 1 // Log download attempt t.LogEvent(ctx, event.SDKDownloadAttempt, "attempting download from super-node", event.EventData{ @@ -88,8 +92,14 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern event.KeyIteration: iteration, }) + // Ensure node qualifies before attempt + if !t.nodeQualifies(ctx, sn, minStorageThresholdBytes, 0) { + continue + } + + attempted++ if err := t.attemptDownload(ctx, sn, clientFactory, req); err != nil { - // Log failure and continue to next supernode + // Log failure and continue with the rest t.LogEvent(ctx, event.SDKDownloadFailure, "download from super-node failed", event.EventData{ event.KeySupernode: sn.GrpcEndpoint, event.KeySupernodeAddress: sn.CosmosAddress, @@ -116,7 +126,6 @@ func (t *CascadeDownloadTask) attemptDownload( factory *net.ClientFactory, req *supernodeservice.CascadeSupernodeDownloadRequest, ) error { - ctx, cancel := context.WithTimeout(parent, downloadTimeout) defer cancel() @@ -140,114 +149,3 @@ func (t *CascadeDownloadTask) attemptDownload( return nil } - -// downloadResult holds the result of a successful download attempt -type downloadResult struct { - SupernodeAddress string - SupernodeEndpoint string - Iteration int -} - -// attemptConcurrentDownload tries to download from multiple supernodes concurrently -// Returns the first successful result or all errors if all attempts fail -func (t *CascadeDownloadTask) attemptConcurrentDownload( - ctx context.Context, - batch lumera.Supernodes, - factory *net.ClientFactory, - req *supernodeservice.CascadeSupernodeDownloadRequest, - baseIteration int, -) (*downloadResult, []error) { - // Remove existing file if it exists to allow overwrite (do this once before concurrent attempts) - if _, err := os.Stat(req.OutputPath); err == nil { - if removeErr := os.Remove(req.OutputPath); removeErr != nil { - return nil, []error{fmt.Errorf("failed to remove existing file %s: %w", req.OutputPath, removeErr)} - } - } - - // Create a cancellable context for this batch - batchCtx, cancelBatch := context.WithCancel(ctx) - defer cancelBatch() - - // Channels for results - type attemptResult struct { - success *downloadResult - err error - idx int - } - resultCh := make(chan attemptResult, len(batch)) - - // Start concurrent download attempts - for idx, sn := range batch { - iteration := baseIteration + idx + 1 - - // Log download attempt - t.LogEvent(ctx, event.SDKDownloadAttempt, "attempting download from super-node", event.EventData{ - event.KeySupernode: sn.GrpcEndpoint, - event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: iteration, - }) - - go func(sn lumera.Supernode, idx int, iter int) { - // Create a copy of the request for this goroutine - reqCopy := &supernodeservice.CascadeSupernodeDownloadRequest{ - ActionID: req.ActionID, - TaskID: req.TaskID, - OutputPath: req.OutputPath, - Signature: req.Signature, - } - - err := t.attemptDownload(batchCtx, sn, factory, reqCopy) - if err != nil { - resultCh <- attemptResult{ - err: err, - idx: idx, - } - return - } - - resultCh <- attemptResult{ - success: &downloadResult{ - SupernodeAddress: sn.CosmosAddress, - SupernodeEndpoint: sn.GrpcEndpoint, - Iteration: iter, - }, - idx: idx, - } - }(sn, idx, iteration) - } - - // Collect results - var errors []error - for i := range len(batch) { - select { - case result := <-resultCh: - if result.success != nil { - // Success! Cancel other attempts and return - cancelBatch() - // Drain remaining results to avoid goroutine leaks - go func() { - for j := i + 1; j < len(batch); j++ { - <-resultCh - } - }() - return result.success, nil - } - - // Log failure - sn := batch[result.idx] - t.LogEvent(ctx, event.SDKDownloadFailure, "download from super-node failed", event.EventData{ - event.KeySupernode: sn.GrpcEndpoint, - event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: baseIteration + result.idx + 1, - event.KeyError: result.err.Error(), - }) - errors = append(errors, result.err) - - case <-ctx.Done(): - return nil, []error{ctx.Err()} - } - } - - // All attempts in this batch failed - return nil, errors -} diff --git a/sdk/task/helpers.go b/sdk/task/helpers.go index 2ea8bcaa..2e9ee4c3 100644 --- a/sdk/task/helpers.go +++ b/sdk/task/helpers.go @@ -4,10 +4,11 @@ import ( "context" "encoding/base64" "fmt" + "math/big" "os" - "path/filepath" - "strings" + "sort" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" ) @@ -47,7 +48,7 @@ func (m *ManagerImpl) validateAction(ctx context.Context, actionID string) (lume } // validateSignature verifies the authenticity of a signature against an action's data hash. -// + // This function performs the following steps: // 1. Decodes the CASCADE metadata from the provided Lumera action // 2. Extracts the base64-encoded data hash from the metadata @@ -100,7 +101,7 @@ func (m *ManagerImpl) validateSignature(ctx context.Context, action lumera.Actio return nil } -// (Removed) Peers connectivity preflight is now enforced during discovery in isServing. +// func (m *ManagerImpl) validateDownloadAction(ctx context.Context, actionID string) (lumera.Action, error) { action, err := m.lumeraClient.GetAction(ctx, actionID) @@ -121,18 +122,46 @@ func (m *ManagerImpl) validateDownloadAction(ctx context.Context, actionID strin return action, nil } -// Helper function to ensure output path has the correct filename -func ensureOutputPathWithFilename(outputPath, filename string) string { - // If outputPath is empty, just return the filename - if outputPath == "" { - return filename +func orderSupernodesByDeterministicDistance(seed string, sns lumera.Supernodes) lumera.Supernodes { + if len(sns) == 0 || seed == "" { + return sns } - - // Check if the path already ends with the filename - if strings.HasSuffix(outputPath, filename) { - return outputPath + // Precompute seed hash (blake3) + seedHash, err := utils.Blake3Hash([]byte(seed)) + if err != nil { + return sns } - // Otherwise, append the filename to the path - return filepath.Join(outputPath, filename) + type nodeDist struct { + sn lumera.Supernode + distance *big.Int + } + nd := make([]nodeDist, 0, len(sns)) + for _, sn := range sns { + id := sn.CosmosAddress + if id == "" { + id = sn.GrpcEndpoint + } + nHash, err := utils.Blake3Hash([]byte(id)) + if err != nil { + nd = append(nd, nodeDist{sn: sn, distance: new(big.Int).SetInt64(0)}) + continue + } + // XOR distance across min length + l := len(seedHash) + if len(nHash) < l { + l = len(nHash) + } + xor := make([]byte, l) + for i := 0; i < l; i++ { + xor[i] = seedHash[i] ^ nHash[i] + } + nd = append(nd, nodeDist{sn: sn, distance: new(big.Int).SetBytes(xor)}) + } + sort.Slice(nd, func(i, j int) bool { return nd[i].distance.Cmp(nd[j].distance) < 0 }) + out := make(lumera.Supernodes, len(nd)) + for i := range nd { + out[i] = nd[i].sn + } + return out } diff --git a/sdk/task/task.go b/sdk/task/task.go index bb402975..eea513fc 100644 --- a/sdk/task/task.go +++ b/sdk/task/task.go @@ -4,11 +4,9 @@ import ( "context" "errors" "fmt" - "sync" + "os" sdkmath "cosmossdk.io/math" - "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" txmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" "github.com/LumeraProtocol/supernode/v2/sdk/config" @@ -27,6 +25,14 @@ const ( TaskTypeCascade TaskType = "CASCADE" ) +// Package-level thresholds and tuning +const ( + // Minimum available storage required on any volume (bytes) + minStorageThresholdBytes uint64 = 50 * 1024 * 1024 * 1024 // 50 GB + // Upload requires free RAM to be at least 8x the file size + uploadRAMMultiplier uint64 = 8 +) + // EventCallback is a function that processes events from tasks type EventCallback func(ctx context.Context, e event.Event) @@ -82,78 +88,118 @@ func (t *BaseTask) fetchSupernodes(ctx context.Context, height int64) (lumera.Su if err != nil { return nil, fmt.Errorf("fetch supernodes: %w", err) } - if len(sns) == 0 { return nil, errors.New("no supernodes found") } + return sns, nil +} - // Keep only SERVING nodes (done in parallel – keeps latency flat) - healthy := make(lumera.Supernodes, 0, len(sns)) - eg, ctx := errgroup.WithContext(ctx) - mu := sync.Mutex{} - - for _, sn := range sns { - sn := sn - eg.Go(func() error { - if t.isServing(ctx, sn) { - mu.Lock() - healthy = append(healthy, sn) - mu.Unlock() - } - return nil - }) +// orderByXORDistance ranks supernodes by XOR distance to the action's data hash. +// If decoding metadata fails, falls back to using the action ID as the seed. +func (t *BaseTask) orderByXORDistance(ctx context.Context, sns lumera.Supernodes) lumera.Supernodes { + if len(sns) <= 1 { + return sns } - if err := eg.Wait(); err != nil { - return nil, fmt.Errorf("health-check goroutines: %w", err) + // Try to decode the action metadata to get the Cascade data hash as seed + seed := t.ActionID + if t.client != nil && (t.Action.Metadata != nil || t.Action.ActionType != "") { + if meta, err := t.client.DecodeCascadeMetadata(ctx, t.Action); err == nil && meta.DataHash != "" { + seed = meta.DataHash + } } + return orderSupernodesByDeterministicDistance(seed, sns) +} - if len(healthy) == 0 { - return nil, errors.New("no healthy supernodes found") - } +// filterByResourceThresholds removes supernodes that do not satisfy minimum +// available storage and free RAM thresholds. +// - minStorageBytes: minimum available storage on any volume (bytes) +// - minFreeRamBytes: minimum free RAM (bytes). If 0, RAM check is skipped. - return healthy, nil +// helper: get file size (bytes). returns 0 on error +func getFileSizeBytes(p string) int64 { + fi, err := os.Stat(p) + if err != nil { + return 0 + } + return fi.Size() } -// isServing pings the super-node once with a short timeout. -func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool { +// nodeQualifies performs balance, health, and resource checks for a supernode. +func (t *BaseTask) nodeQualifies(parent context.Context, sn lumera.Supernode, minStorageBytes uint64, minFreeRamBytes uint64) bool { + // 1) Balance check (require at least 1 LUME) + if !t.balanceOK(parent, sn) { + return false + } + + // 2) Health + resources via a single client session ctx, cancel := context.WithTimeout(parent, connectionTimeout) defer cancel() - client, err := net.NewClientFactory(ctx, t.logger, t.keyring, t.client, net.FactoryConfig{ LocalCosmosAddress: t.config.Account.LocalCosmosAddress, PeerType: t.config.Account.PeerType, }).CreateClient(ctx, sn) if err != nil { - logtrace.Debug(ctx, "Failed to create client for supernode", logtrace.Fields{logtrace.FieldMethod: "isServing"}) return false } defer client.Close(ctx) - // First check gRPC health - resp, err := client.HealthCheck(ctx) - if err != nil || resp.Status != grpc_health_v1.HealthCheckResponse_SERVING { + // Health check + h, err := client.HealthCheck(ctx) + if err != nil || h == nil || h.Status != grpc_health_v1.HealthCheckResponse_SERVING { return false } - // Then check P2P peers count via status - status, err := client.GetSupernodeStatus(ctx) - if err != nil { - return false - } - if status.Network.PeersCount <= 1 { - return false - } + // Resource thresholds + return t.resourcesOK(ctx, client, sn, minStorageBytes, minFreeRamBytes) +} - denom := txmod.DefaultFeeDenom // base denom (micro), e.g., "ulume" +func (t *BaseTask) balanceOK(parent context.Context, sn lumera.Supernode) bool { + ctx, cancel := context.WithTimeout(parent, connectionTimeout) + defer cancel() + min := sdkmath.NewInt(1_000_000) // 1 LUME in ulume + denom := txmod.DefaultFeeDenom bal, err := t.client.GetBalance(ctx, sn.CosmosAddress, denom) if err != nil || bal == nil || bal.Balance == nil { return false } - // Require at least 1 LUME = 10^6 micro (ulume) - min := sdkmath.NewInt(1_000_000) if bal.Balance.Amount.LT(min) { return false } + return true +} +func (t *BaseTask) resourcesOK(ctx context.Context, client net.SupernodeClient, sn lumera.Supernode, minStorageBytes uint64, minFreeRamBytes uint64) bool { + // In tests, skip resource thresholds (keep balance + health via nodeQualifies) + if os.Getenv("INTEGRATION_TEST") == "true" { + return true + } + status, err := client.GetSupernodeStatus(ctx) + if err != nil || status == nil || status.Resources == nil { + return false + } + // Storage: any volume must satisfy available >= minStorageBytes + if minStorageBytes > 0 { + ok := false + for _, vol := range status.Resources.StorageVolumes { + if vol != nil && vol.AvailableBytes >= minStorageBytes { + ok = true + break + } + } + if !ok { + return false + } + } + // RAM: available_gb must be >= required GiB + if minFreeRamBytes > 0 { + mem := status.Resources.Memory + if mem == nil { + return false + } + requiredGiB := float64(minFreeRamBytes) / (1024.0 * 1024.0 * 1024.0) + if mem.AvailableGb < requiredGiB { + return false + } + } return true } diff --git a/sn-manager/cmd/check.go b/sn-manager/cmd/check.go index df20b2a5..4910eb06 100644 --- a/sn-manager/cmd/check.go +++ b/sn-manager/cmd/check.go @@ -1,14 +1,14 @@ package cmd import ( - "fmt" - "strings" + "fmt" + "strings" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/updater" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" - "github.com/spf13/cobra" + "github.com/LumeraProtocol/supernode/v2/pkg/github" + "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" + "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/updater" + "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" + "github.com/spf13/cobra" ) var checkCmd = &cobra.Command{ @@ -32,8 +32,8 @@ func runCheck(cmd *cobra.Command, args []string) error { fmt.Println("Checking for updates...") - // Create GitHub client - client := github.NewClient(config.GitHubRepo) + // Create GitHub client + client := github.NewClient(config.GitHubRepo) // Get latest stable release release, err := client.GetLatestStableRelease() @@ -41,26 +41,26 @@ func runCheck(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to check for stable updates: %w", err) } - fmt.Printf("\nLatest release: %s\n", release.TagName) - fmt.Printf("Current version: %s\n", cfg.Updates.CurrentVersion) - // Report manager version and if it would update under the same policy - mv := strings.TrimSpace(appVersion) - if mv != "" && mv != "dev" && !strings.EqualFold(mv, "unknown") { - managerWould := utils.SameMajor(mv, release.TagName) && utils.CompareVersions(mv, release.TagName) < 0 - fmt.Printf("Manager version: %s (would update: %v)\n", mv, managerWould) - } else { - fmt.Printf("Manager version: %s\n", appVersion) - } + fmt.Printf("\nLatest release: %s\n", release.TagName) + fmt.Printf("Current version: %s\n", cfg.Updates.CurrentVersion) + // Report manager version and if it would update under the same policy + mv := strings.TrimSpace(appVersion) + if mv != "" && mv != "dev" && !strings.EqualFold(mv, "unknown") { + managerWould := utils.SameMajor(mv, release.TagName) && utils.CompareVersions(mv, release.TagName) < 0 + fmt.Printf("Manager version: %s (would update: %v)\n", mv, managerWould) + } else { + fmt.Printf("Manager version: %s\n", appVersion) + } - // Compare versions - cmp := utils.CompareVersions(cfg.Updates.CurrentVersion, release.TagName) + // Compare versions + cmp := utils.CompareVersions(cfg.Updates.CurrentVersion, release.TagName) if cmp < 0 { // Use the same logic as auto-updater to determine update eligibility - managerHome := config.GetManagerHome() - autoUpdater := updater.New(managerHome, cfg, appVersion) - wouldAutoUpdate := autoUpdater.ShouldUpdate(cfg.Updates.CurrentVersion, release.TagName) - + managerHome := config.GetManagerHome() + autoUpdater := updater.New(managerHome, cfg, appVersion, nil) + wouldAutoUpdate := autoUpdater.ShouldUpdate(cfg.Updates.CurrentVersion, release.TagName) + if wouldAutoUpdate { fmt.Printf("\n✓ Update available: %s → %s\n", cfg.Updates.CurrentVersion, release.TagName) fmt.Printf("Published: %s\n", release.PublishedAt.Format("2006-01-02 15:04:05")) diff --git a/sn-manager/cmd/get.go b/sn-manager/cmd/get.go index eb8f0fac..7244c10f 100644 --- a/sn-manager/cmd/get.go +++ b/sn-manager/cmd/get.go @@ -6,8 +6,8 @@ import ( "os" "path/filepath" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/version" "github.com/spf13/cobra" diff --git a/sn-manager/cmd/init.go b/sn-manager/cmd/init.go index 383d70ad..2eb2639c 100644 --- a/sn-manager/cmd/init.go +++ b/sn-manager/cmd/init.go @@ -8,8 +8,8 @@ import ( "path/filepath" "github.com/AlecAivazis/survey/v2" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/version" "github.com/spf13/cobra" diff --git a/sn-manager/cmd/ls-remote.go b/sn-manager/cmd/ls-remote.go index 65619fd1..0d7bdff6 100644 --- a/sn-manager/cmd/ls-remote.go +++ b/sn-manager/cmd/ls-remote.go @@ -3,8 +3,8 @@ package cmd import ( "fmt" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/spf13/cobra" ) @@ -15,37 +15,37 @@ var lsRemoteCmd = &cobra.Command{ } func runLsRemote(cmd *cobra.Command, args []string) error { - client := github.NewClient(config.GitHubRepo) - - releases, err := client.ListReleases() - if err != nil { - return fmt.Errorf("failed to list releases: %w", err) - } - - // Filter to stable (non-draft, non-prerelease) - var stable []*github.Release - for _, r := range releases { - if !r.Draft && !r.Prerelease { - stable = append(stable, r) - } - } - - if len(stable) == 0 { - fmt.Println("No releases found") - return nil - } - - fmt.Println("Available versions:") - for i, release := range stable { - if i == 0 { - fmt.Printf(" %s (latest) - %s\n", release.TagName, release.PublishedAt.Format("2006-01-02")) - } else { - fmt.Printf(" %s - %s\n", release.TagName, release.PublishedAt.Format("2006-01-02")) - } - if i >= 9 { - break - } - } + client := github.NewClient(config.GitHubRepo) + + releases, err := client.ListReleases() + if err != nil { + return fmt.Errorf("failed to list releases: %w", err) + } + + // Filter to stable (non-draft, non-prerelease) + var stable []*github.Release + for _, r := range releases { + if !r.Draft && !r.Prerelease { + stable = append(stable, r) + } + } + + if len(stable) == 0 { + fmt.Println("No releases found") + return nil + } + + fmt.Println("Available versions:") + for i, release := range stable { + if i == 0 { + fmt.Printf(" %s (latest) - %s\n", release.TagName, release.PublishedAt.Format("2006-01-02")) + } else { + fmt.Printf(" %s - %s\n", release.TagName, release.PublishedAt.Format("2006-01-02")) + } + if i >= 9 { + break + } + } return nil } diff --git a/sn-manager/cmd/start.go b/sn-manager/cmd/start.go index de03c6dd..6deb1583 100644 --- a/sn-manager/cmd/start.go +++ b/sn-manager/cmd/start.go @@ -11,8 +11,8 @@ import ( "strings" "syscall" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/manager" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/updater" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" @@ -121,12 +121,27 @@ func runStart(cmd *cobra.Command, args []string) error { } } + // orchestrator to gracefully stop SuperNode and exit manager with code 3 + gracefulManagerRestart := func() { + // Write stop marker so monitor won't auto-restart SuperNode + stopMarkerPath := filepath.Join(home, stopMarkerFile) + _ = os.WriteFile(stopMarkerPath, []byte("manager-update"), 0644) + + // Attempt graceful stop of SuperNode if running + if mgr.IsRunning() { + if err := mgr.Stop(); err != nil { + log.Printf("Failed to stop supernode: %v", err) + } + } + os.Exit(3) + } + // Mandatory version sync on startup: ensure both sn-manager and SuperNode // are at the latest stable release. This bypasses regular updater checks // (gateway idleness, same-major policy) to guarantee a consistent baseline. - // Runs once before monitoring begins. + // Runs once before monitoring begins. If manager updated, restart now. func() { - u := updater.New(home, cfg, appVersion) + u := updater.New(home, cfg, appVersion, gracefulManagerRestart) // Do not block startup on failures; best-effort sync defer func() { recover() }() u.ForceSyncToLatest(context.Background()) @@ -135,7 +150,7 @@ func runStart(cmd *cobra.Command, args []string) error { // Start auto-updater if enabled var autoUpdater *updater.AutoUpdater if cfg.Updates.AutoUpgrade { - autoUpdater = updater.New(home, cfg, appVersion) + autoUpdater = updater.New(home, cfg, appVersion, gracefulManagerRestart) autoUpdater.Start(ctx) } @@ -171,7 +186,15 @@ func runStart(cmd *cobra.Command, args []string) error { return nil case err := <-monitorDone: - // Monitor exited unexpectedly + // Monitor exited; ensure SuperNode is stopped as manager exits + if autoUpdater != nil { + autoUpdater.Stop() + } + if mgr.IsRunning() { + if stopErr := mgr.Stop(); stopErr != nil { + log.Printf("Failed to stop supernode: %v", stopErr) + } + } if err != nil { return fmt.Errorf("monitor error: %w", err) } diff --git a/sn-manager/go.mod b/sn-manager/go.mod index 1beee097..8d29e8e6 100644 --- a/sn-manager/go.mod +++ b/sn-manager/go.mod @@ -3,15 +3,14 @@ module github.com/LumeraProtocol/supernode/v2/sn-manager go 1.24.1 require ( - github.com/AlecAivazis/survey/v2 v2.3.7 - github.com/LumeraProtocol/supernode/v2 v2.0.0-00010101000000-000000000000 - github.com/spf13/cobra v1.8.1 - gopkg.in/yaml.v3 v3.0.1 + github.com/AlecAivazis/survey/v2 v2.3.7 + github.com/LumeraProtocol/supernode/v2 v2.0.0-00010101000000-000000000000 + github.com/spf13/cobra v1.8.1 + gopkg.in/yaml.v3 v3.0.1 ) require ( - github.com/golang/protobuf v1.5.4 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -24,7 +23,6 @@ require ( golang.org/x/sys v0.31.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect - google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect google.golang.org/grpc v1.71.0 // indirect diff --git a/sn-manager/go.sum b/sn-manager/go.sum index 51f96134..6413ef48 100644 --- a/sn-manager/go.sum +++ b/sn-manager/go.sum @@ -1,14 +1,7 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -16,37 +9,28 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -59,8 +43,8 @@ github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyex github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= @@ -85,44 +69,22 @@ go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= -go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -142,40 +104,20 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 h1:6whtk83KtD3FkGrVb2hFXuQ+ZMbCNdakARIn/aHMmG8= -google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094/go.mod h1:Zs4wYw8z1zr6RNF4cwYb31mvN/EGaKAdQjNCF3DW6K4= google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM= google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8= google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/sn-manager/internal/config/config.go b/sn-manager/internal/config/config.go index 050d36b4..f41a7f89 100644 --- a/sn-manager/internal/config/config.go +++ b/sn-manager/internal/config/config.go @@ -90,7 +90,3 @@ func Save(cfg *Config, path string) error { return nil } - -// Validate checks if the configuration is valid -// Validate is kept for compatibility; no-op since interval was removed. -func (c *Config) Validate() error { return nil } diff --git a/sn-manager/internal/manager/manager.go b/sn-manager/internal/manager/manager.go index fd176121..06dacdb4 100644 --- a/sn-manager/internal/manager/manager.go +++ b/sn-manager/internal/manager/manager.go @@ -33,11 +33,6 @@ func New(homeDir string) (*Manager, error) { return nil, fmt.Errorf("failed to load config: %w", err) } - // Validate configuration - if err := cfg.Validate(); err != nil { - return nil, fmt.Errorf("invalid config: %w", err) - } - return &Manager{ config: cfg, homeDir: homeDir, @@ -175,9 +170,9 @@ func (m *Manager) cleanup() { const ( DefaultShutdownTimeout = 30 * time.Second ProcessCheckInterval = 5 * time.Second - CrashBackoffDelay = 2 * time.Second - StopMarkerFile = ".stop_requested" - RestartMarkerFile = ".needs_restart" + CrashBackoffDelay = 2 * time.Second + StopMarkerFile = ".stop_requested" + RestartMarkerFile = ".needs_restart" ) // Monitor continuously supervises the SuperNode process @@ -190,7 +185,7 @@ func (m *Manager) Monitor(ctx context.Context) error { // Channel to monitor process exits processExitCh := make(chan error, 1) - + // Function to arm the process wait goroutine armProcessWait := func() { processExitCh = make(chan error, 1) @@ -262,7 +257,7 @@ func (m *Manager) Monitor(ctx context.Context) error { case <-ticker.C: // Periodic check for various conditions - + // 1. Check if stop marker was removed and we should start if !m.IsRunning() { if _, err := os.Stat(stopMarkerPath); os.IsNotExist(err) { @@ -281,16 +276,16 @@ func (m *Manager) Monitor(ctx context.Context) error { if _, err := os.Stat(restartMarkerPath); err == nil { if m.IsRunning() { log.Println("Binary update detected, restarting SuperNode...") - + // Remove the restart marker if err := os.Remove(restartMarkerPath); err != nil && !os.IsNotExist(err) { log.Printf("Warning: failed to remove restart marker: %v", err) } - + // Create temporary stop marker for clean restart tmpStopMarker := []byte("update") os.WriteFile(stopMarkerPath, tmpStopMarker, 0644) - + // Stop current process if err := m.Stop(); err != nil { log.Printf("Failed to stop for update: %v", err) @@ -299,15 +294,15 @@ func (m *Manager) Monitor(ctx context.Context) error { } continue } - + // Brief pause time.Sleep(CrashBackoffDelay) - + // Remove temporary stop marker if err := os.Remove(stopMarkerPath); err != nil && !os.IsNotExist(err) { log.Printf("Warning: failed to remove stop marker: %v", err) } - + // Start with new binary log.Println("Starting with updated binary...") if err := m.Start(ctx); err != nil { @@ -325,7 +320,7 @@ func (m *Manager) Monitor(ctx context.Context) error { m.mu.RLock() proc := m.process m.mu.RUnlock() - + if proc != nil { if err := proc.Signal(syscall.Signal(0)); err != nil { // Process is dead but not cleaned up @@ -344,4 +339,3 @@ func (m *Manager) Monitor(ctx context.Context) error { func (m *Manager) GetConfig() *config.Config { return m.config } - diff --git a/sn-manager/internal/updater/updater.go b/sn-manager/internal/updater/updater.go index 5bf650c1..2e6f9d56 100644 --- a/sn-manager/internal/updater/updater.go +++ b/sn-manager/internal/updater/updater.go @@ -12,11 +12,11 @@ import ( "time" pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/version" - "github.com/LumeraProtocol/supernode/v2/supernode/node/supernode/gateway" + "github.com/LumeraProtocol/supernode/v2/supernode/transport/gateway" "google.golang.org/protobuf/encoding/protojson" ) @@ -28,7 +28,7 @@ const ( updateCheckInterval = 10 * time.Minute // forceUpdateAfter is the age threshold after a release is published // beyond which updates are applied regardless of normal gates (idle, policy) - forceUpdateAfter = 30 * time.Minute + forceUpdateAfter = 10 * time.Minute ) type AutoUpdater struct { @@ -43,22 +43,25 @@ type AutoUpdater struct { // Gateway error backoff state gwErrCount int gwErrWindowStart time.Time + // Optional hook to handle manager update (restart) orchestration + onManagerUpdate func() } // Use protobuf JSON decoding for gateway responses (int64s encoded as strings) -func New(homeDir string, cfg *config.Config, managerVersion string) *AutoUpdater { +func New(homeDir string, cfg *config.Config, managerVersion string, onManagerUpdate func()) *AutoUpdater { // Use the correct gateway endpoint with imported constants gatewayURL := fmt.Sprintf("http://localhost:%d/api/v1/status", gateway.DefaultGatewayPort) return &AutoUpdater{ - config: cfg, - homeDir: homeDir, - githubClient: github.NewClient(config.GitHubRepo), - versionMgr: version.NewManager(homeDir), - gatewayURL: gatewayURL, - stopCh: make(chan struct{}), - managerVersion: managerVersion, + config: cfg, + homeDir: homeDir, + githubClient: github.NewClient(config.GitHubRepo), + versionMgr: version.NewManager(homeDir), + gatewayURL: gatewayURL, + stopCh: make(chan struct{}), + managerVersion: managerVersion, + onManagerUpdate: onManagerUpdate, } } @@ -133,9 +136,6 @@ func (u *AutoUpdater) ShouldUpdate(current, latest string) bool { return false } -// isGatewayIdle returns (idle, isError). When isError is true, -// the gateway could not be reliably checked (network/error/invalid). -// When isError is false and idle is false, the gateway is busy. func (u *AutoUpdater) isGatewayIdle() (bool, bool) { client := &http.Client{Timeout: gatewayTimeout} @@ -163,16 +163,16 @@ func (u *AutoUpdater) isGatewayIdle() (bool, bool) { return false, true } - totalTasks := 0 - for _, service := range status.RunningTasks { - totalTasks += int(service.TaskCount) + // Idle when there are no running tasks across all services + if len(status.GetRunningTasks()) == 0 { + return true, false } - - if totalTasks > 0 { - log.Printf("Gateway busy: %d running tasks", totalTasks) - return false, false + for _, st := range status.GetRunningTasks() { + if st.GetTaskCount() > 0 || len(st.GetTaskIds()) > 0 { + log.Printf("Gateway busy: service=%s tasks=%d", st.GetServiceName(), st.GetTaskCount()) + return false, false + } } - return true, false } @@ -353,10 +353,12 @@ func (u *AutoUpdater) checkAndUpdateCombined(force bool) { // If manager updated, restart service after completing all work if managerUpdated { log.Printf("Self-update applied, restarting service...") - go func() { - time.Sleep(500 * time.Millisecond) + if u.onManagerUpdate != nil { + u.onManagerUpdate() + } else { + // Fallback: immediate process restart signal os.Exit(3) - }() + } } } diff --git a/supernode/adaptors/lumera.go b/supernode/adaptors/lumera.go new file mode 100644 index 00000000..958e7701 --- /dev/null +++ b/supernode/adaptors/lumera.go @@ -0,0 +1,47 @@ +package adaptors + +import ( + "context" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + sdktx "github.com/cosmos/cosmos-sdk/types/tx" +) + +type LumeraClient interface { + GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) + GetTopSupernodes(ctx context.Context, blockHeight uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) + Verify(ctx context.Context, address string, msg []byte, sig []byte) error + GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) + SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) + FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) +} + +type lumeraImpl struct{ c lumera.Client } + +func NewLumeraClient(c lumera.Client) LumeraClient { return &lumeraImpl{c: c} } + +func (l *lumeraImpl) GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) { + return l.c.Action().GetAction(ctx, actionID) +} + +func (l *lumeraImpl) GetTopSupernodes(ctx context.Context, blockHeight uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) { + return l.c.SuperNode().GetTopSuperNodesForBlock(ctx, blockHeight) +} + +func (l *lumeraImpl) Verify(ctx context.Context, address string, msg []byte, sig []byte) error { + return l.c.Auth().Verify(ctx, address, msg, sig) +} + +func (l *lumeraImpl) GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) { + return l.c.Action().GetActionFee(ctx, dataSizeKB) +} + +func (l *lumeraImpl) SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) { + return l.c.ActionMsg().SimulateFinalizeCascadeAction(ctx, actionID, rqids) +} + +func (l *lumeraImpl) FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) { + return l.c.ActionMsg().FinalizeCascadeAction(ctx, actionID, rqids) +} diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/adaptors/p2p.go similarity index 58% rename from supernode/services/cascade/adaptors/p2p.go rename to supernode/adaptors/p2p.go index f0c47ee5..31184fd7 100644 --- a/supernode/services/cascade/adaptors/p2p.go +++ b/supernode/adaptors/p2p.go @@ -15,34 +15,24 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/storage" - "github.com/pkg/errors" ) const ( - loadSymbolsBatchSize = 3000 - // Minimum first-pass coverage to store before returning from Register (percent) - storeSymbolsPercent = 18 - + loadSymbolsBatchSize = 100 + storeSymbolsPercent = 18 storeBatchContextTimeout = 3 * time.Minute + P2PDataRaptorQSymbol = 1 ) -// P2PService defines the interface for storing data in the P2P layer. -// -//go:generate mockgen -destination=mocks/p2p_mock.go -package=cascadeadaptormocks -source=p2p.go type P2PService interface { - // StoreArtefacts stores ID files and RaptorQ symbols. - // Metrics are recorded via internal metrics helpers; no metrics are returned. StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error } -// p2pImpl is the default implementation of the P2PService interface. type p2pImpl struct { p2p p2p.Client rqStore rqstore.Store } -// NewP2PService returns a concrete implementation of P2PService. func NewP2PService(client p2p.Client, store rqstore.Store) P2PService { return &p2pImpl{p2p: client, rqStore: store} } @@ -56,132 +46,84 @@ type StoreArtefactsRequest struct { func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error { logtrace.Info(ctx, "store: p2p start", logtrace.Fields{"taskID": req.TaskID, "actionID": req.ActionID, "id_files": len(req.IDFiles), "symbols_dir": req.SymbolsDir}) - start := time.Now() firstPassSymbols, totalSymbols, err := p.storeCascadeSymbolsAndData(ctx, req.TaskID, req.ActionID, req.SymbolsDir, req.IDFiles) if err != nil { - return errors.Wrap(err, "error storing artefacts") + return fmt.Errorf("error storing artefacts: %w", err) } - dur := time.Since(start).Milliseconds() - // After first-pass, log how many symbols remain on disk remaining := 0 if req.SymbolsDir != "" { if keys, werr := walkSymbolTree(req.SymbolsDir); werr == nil { remaining = len(keys) } } - logtrace.Info(ctx, "store: first-pass complete", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total_available": totalSymbols, "id_files_count": len(req.IDFiles), "symbols_left_on_disk": remaining, "ms": dur}) + logtrace.Info(ctx, "store: first-pass complete", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total_available": totalSymbols, "id_files_count": len(req.IDFiles), "symbols_left_on_disk": remaining, "ms": time.Since(start).Milliseconds()}) if remaining == 0 { logtrace.Info(ctx, "store: dir empty after first-pass", logtrace.Fields{"taskID": req.TaskID, "dir": req.SymbolsDir}) } - // Metrics collection removed; logs retained return nil } -// storeCascadeSymbols loads symbols from `symbolsDir`, optionally downsamples, -// streams them in fixed-size batches to the P2P layer, and tracks: -// - an item-weighted aggregate success rate across all batches -// - the total number of symbols processed (item count) -// - the total number of node requests attempted across batches -// -// Returns (aggRate, totalSymbols, totalRequests, err). func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, actionID string, symbolsDir string, metadataFiles [][]byte) (int, int, error) { - /* record directory in DB */ if err := p.rqStore.StoreSymbolDirectory(taskID, symbolsDir); err != nil { return 0, 0, fmt.Errorf("store symbol dir: %w", err) } - - /* gather every symbol path under symbolsDir ------------------------- */ keys, err := walkSymbolTree(symbolsDir) if err != nil { return 0, 0, err } - totalAvailable := len(keys) targetCount := int(math.Ceil(float64(totalAvailable) * storeSymbolsPercent / 100.0)) if targetCount < 1 && totalAvailable > 0 { targetCount = 1 } logtrace.Info(ctx, "store: symbols discovered", logtrace.Fields{"total_symbols": totalAvailable, "dir": symbolsDir}) - logtrace.Info(ctx, "store: target coverage", logtrace.Fields{ - "total_symbols": totalAvailable, - "target_percent": storeSymbolsPercent, - "target_count": targetCount, - }) - - /* down-sample if we exceed the “big directory” threshold ------------- */ + logtrace.Info(ctx, "store: target coverage", logtrace.Fields{"total_symbols": totalAvailable, "target_percent": storeSymbolsPercent, "target_count": targetCount}) if len(keys) > loadSymbolsBatchSize { want := targetCount if want < len(keys) { rand.Shuffle(len(keys), func(i, j int) { keys[i], keys[j] = keys[j], keys[i] }) keys = keys[:want] } - sort.Strings(keys) // deterministic order inside the sample + sort.Strings(keys) } logtrace.Info(ctx, "store: selected symbols", logtrace.Fields{"selected": len(keys), "of_total": totalAvailable, "dir": symbolsDir}) logtrace.Info(ctx, "store: sending symbols", logtrace.Fields{"count": len(keys)}) - - /* stream in fixed-size batches -------------------------------------- */ - - totalSymbols := 0 // symbols stored + totalSymbols := 0 firstBatchProcessed := false - for start := 0; start < len(keys); { end := min(start+loadSymbolsBatchSize, len(keys)) batch := keys[start:end] - if !firstBatchProcessed && len(metadataFiles) > 0 { - // First "batch" has to include metadata + as many symbols as fit under batch size. - // If metadataFiles >= batch size, we send metadata in this batch and symbols start next batch. roomForSymbols := loadSymbolsBatchSize - len(metadataFiles) if roomForSymbols < 0 { roomForSymbols = 0 } if roomForSymbols < len(batch) { - // trim the first symbol chunk to leave space for metadata batch = batch[:roomForSymbols] end = start + roomForSymbols } - - // Load just this symbol chunk symBytes, err := utils.LoadSymbols(symbolsDir, batch) if err != nil { return 0, 0, fmt.Errorf("load symbols: %w", err) } - - // Build combined payload: metadata first, then symbols payload := make([][]byte, 0, len(metadataFiles)+len(symBytes)) payload = append(payload, metadataFiles...) payload = append(payload, symBytes...) - - // Send as the same data type you use for symbols logtrace.Info(ctx, "store: batch send (first)", logtrace.Fields{"taskID": taskID, "metadata_count": len(metadataFiles), "symbols_in_batch": len(symBytes), "payload_total": len(payload)}) bctx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) - err = p.p2p.StoreBatch(bctx, payload, storage.P2PDataRaptorQSymbol, taskID) + err = p.p2p.StoreBatch(bctx, payload, P2PDataRaptorQSymbol, taskID) cancel() if err != nil { return totalSymbols, totalAvailable, fmt.Errorf("p2p store batch (first): %w", err) } logtrace.Info(ctx, "store: batch ok (first)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symBytes)}) - totalSymbols += len(symBytes) - // No per-RPC metrics propagated from p2p - - // Delete only the symbols we uploaded if len(batch) > 0 { if err := utils.DeleteSymbols(ctx, symbolsDir, batch); err != nil { return totalSymbols, totalAvailable, fmt.Errorf("delete symbols: %w", err) } } - // Log remaining symbols in directory after deletion - if rem, werr := walkSymbolTree(symbolsDir); werr == nil { - if left := len(rem); left > 0 { - logtrace.Info(ctx, "store: remaining after first batch", logtrace.Fields{"taskID": taskID, "left": left}) - } else { - logtrace.Info(ctx, "store: dir empty after first batch", logtrace.Fields{"taskID": taskID, "dir": symbolsDir}) - } - } - firstBatchProcessed = true } else { count, err := p.storeSymbolsInP2P(ctx, taskID, symbolsDir, batch) @@ -190,43 +132,23 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action } totalSymbols += count } - start = end } - - // Coverage uses symbols only - achievedPct := 0.0 - if totalAvailable > 0 { - achievedPct = (float64(totalSymbols) / float64(totalAvailable)) * 100.0 - } - logtrace.Info(ctx, "store: coverage", logtrace.Fields{"achieved_symbols": totalSymbols, "achieved_percent": achievedPct}) - if err := p.rqStore.UpdateIsFirstBatchStored(actionID); err != nil { return totalSymbols, totalAvailable, fmt.Errorf("update first-batch flag: %w", err) } - // Final remaining count after first pass flagged - if rem, werr := walkSymbolTree(symbolsDir); werr == nil { - if left := len(rem); left > 0 { - logtrace.Info(ctx, "store: remaining after first-pass", logtrace.Fields{"taskID": taskID, "left": left, "dir": symbolsDir}) - } else { - logtrace.Info(ctx, "store: directory empty after first-pass", logtrace.Fields{"taskID": taskID, "dir": symbolsDir}) - } - } - return totalSymbols, totalAvailable, nil - } func walkSymbolTree(root string) ([]string, error) { var keys []string err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { if err != nil { - return err // propagate I/O errors + return err } if d.IsDir() { - return nil // skip directory nodes + return nil } - // ignore layout json if present if strings.EqualFold(filepath.Ext(d.Name()), ".json") { return nil } @@ -234,7 +156,7 @@ func walkSymbolTree(root string) ([]string, error) { if err != nil { return err } - keys = append(keys, rel) // store as "block_0/filename" + keys = append(keys, rel) return nil }) if err != nil { @@ -243,35 +165,28 @@ func walkSymbolTree(root string) ([]string, error) { return keys, nil } -// storeSymbolsInP2P loads a batch of symbols and stores them via P2P. -// Returns (ratePct, requests, count, error) where `count` is the number of symbols in this batch. func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) (int, error) { logtrace.Debug(ctx, "loading batch symbols", logtrace.Fields{"taskID": taskID, "count": len(fileKeys)}) - symbols, err := utils.LoadSymbols(root, fileKeys) if err != nil { return 0, fmt.Errorf("load symbols: %w", err) } - symCtx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) defer cancel() - logtrace.Info(ctx, "store: batch send (symbols)", logtrace.Fields{"taskID": taskID, "symbols_in_batch": len(symbols)}) - if err := c.p2p.StoreBatch(symCtx, symbols, storage.P2PDataRaptorQSymbol, taskID); err != nil { + if err := c.p2p.StoreBatch(symCtx, symbols, P2PDataRaptorQSymbol, taskID); err != nil { return len(symbols), fmt.Errorf("p2p store batch: %w", err) } logtrace.Info(ctx, "store: batch ok (symbols)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symbols)}) - if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { return len(symbols), fmt.Errorf("delete symbols: %w", err) } - // After deletion, log remaining count in directory - left := -1 - if rem, werr := walkSymbolTree(root); werr == nil { - left = len(rem) - } - logtrace.Debug(ctx, "deleted batch symbols", logtrace.Fields{"taskID": taskID, "count": len(symbols), "symbols_left_on_disk": left}) - - // No per-RPC metrics propagated from p2p return len(symbols), nil } + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/supernode/adaptors/rq.go b/supernode/adaptors/rq.go new file mode 100644 index 00000000..b8efa1dd --- /dev/null +++ b/supernode/adaptors/rq.go @@ -0,0 +1,54 @@ +package adaptors + +import ( + "context" + "os" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" +) + +// CodecService wraps codec operations used by cascade +type CodecService interface { + EncodeInput(ctx context.Context, actionID string, filePath string) (EncodeResult, error) + Decode(ctx context.Context, req DecodeRequest) (DecodeResult, error) +} + +type EncodeResult struct { + SymbolsDir string + Layout codec.Layout +} + +type DecodeRequest struct { + ActionID string + Symbols map[string][]byte + Layout codec.Layout +} + +type DecodeResult struct { + FilePath string + DecodeTmpDir string +} + +type codecImpl struct{ codec codec.Codec } + +func NewCodecService(c codec.Codec) CodecService { return &codecImpl{codec: c} } + +func (c *codecImpl) EncodeInput(ctx context.Context, actionID, filePath string) (EncodeResult, error) { + var size int + if fi, err := os.Stat(filePath); err == nil { + size = int(fi.Size()) + } + res, err := c.codec.Encode(ctx, codec.EncodeRequest{TaskID: actionID, Path: filePath, DataSize: size}) + if err != nil { + return EncodeResult{}, err + } + return EncodeResult{SymbolsDir: res.SymbolsDir, Layout: res.Layout}, nil +} + +func (c *codecImpl) Decode(ctx context.Context, req DecodeRequest) (DecodeResult, error) { + res, err := c.codec.Decode(ctx, codec.DecodeRequest{ActionID: req.ActionID, Symbols: req.Symbols, Layout: req.Layout}) + if err != nil { + return DecodeResult{}, err + } + return DecodeResult{FilePath: res.FilePath, DecodeTmpDir: res.DecodeTmpDir}, nil +} diff --git a/supernode/cascade/download.go b/supernode/cascade/download.go new file mode 100644 index 00000000..986fb55d --- /dev/null +++ b/supernode/cascade/download.go @@ -0,0 +1,250 @@ +package cascade + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "sort" + "time" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/crypto" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/supernode/adaptors" +) + +const targetRequiredPercent = 17 + +type DownloadRequest struct { + ActionID string + Signature string +} + +type DownloadResponse struct { + EventType SupernodeEventType + Message string + FilePath string + DownloadedDir string +} + +func (task *CascadeRegistrationTask) Download(ctx context.Context, req *DownloadRequest, send func(resp *DownloadResponse) error) (err error) { + if req != nil && req.ActionID != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) + ctx = logtrace.CtxWithOrigin(ctx, "download") + } + fields := logtrace.Fields{logtrace.FieldMethod: "Download", logtrace.FieldRequest: req} + logtrace.Info(ctx, "download: request", fields) + + actionDetails, err := task.LumeraClient.GetAction(ctx, req.ActionID) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return task.wrapErr(ctx, "failed to get action", err, fields) + } + logtrace.Info(ctx, "download: action fetched", fields) + task.streamDownloadEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", "", send) + + if actionDetails.GetAction().State != actiontypes.ActionStateDone { + err = errors.New("action is not in a valid state") + fields[logtrace.FieldError] = "action state is not done yet" + fields[logtrace.FieldActionState] = actionDetails.GetAction().State + return task.wrapErr(ctx, "action not finalized yet", err, fields) + } + logtrace.Info(ctx, "download: action state ok", fields) + + metadata, err := cascadekit.UnmarshalCascadeMetadata(actionDetails.GetAction().Metadata) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return task.wrapErr(ctx, "error decoding cascade metadata", err, fields) + } + logtrace.Info(ctx, "download: metadata decoded", fields) + task.streamDownloadEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", "", send) + + if !metadata.Public { + if req.Signature == "" { + fields[logtrace.FieldError] = "missing signature for private download" + return task.wrapErr(ctx, "private cascade requires a download signature", nil, fields) + } + if err := task.VerifyDownloadSignature(ctx, req.ActionID, req.Signature); err != nil { + fields[logtrace.FieldError] = err.Error() + return task.wrapErr(ctx, "failed to verify download signature", err, fields) + } + logtrace.Info(ctx, "download: signature verified", fields) + } else { + logtrace.Info(ctx, "download: public cascade (no signature)", fields) + } + + task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) + + logtrace.Info(ctx, "download: network retrieval start", logtrace.Fields{logtrace.FieldActionID: actionDetails.GetAction().ActionID}) + filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields, send) + if err != nil { + fields[logtrace.FieldError] = err.Error() + if tmpDir != "" { + if cerr := task.CleanupDownload(ctx, tmpDir); cerr != nil { + logtrace.Warn(ctx, "cleanup of tmp dir after error failed", logtrace.Fields{"tmp_dir": tmpDir, logtrace.FieldError: cerr.Error()}) + } + } + return task.wrapErr(ctx, "failed to download artifacts", err, fields) + } + logtrace.Debug(ctx, "File reconstructed and hash verified", fields) + task.streamDownloadEvent(SupernodeEventTypeDecodeCompleted, "Decode completed", filePath, tmpDir, send) + + return nil +} + +func (task *CascadeRegistrationTask) CleanupDownload(ctx context.Context, tmpDir string) error { + if tmpDir == "" { + return nil + } + if err := os.RemoveAll(tmpDir); err != nil { + return err + } + return nil +} + +func (task *CascadeRegistrationTask) VerifyDownloadSignature(ctx context.Context, actionID, signature string) error { + if signature == "" { + return errors.New("signature required") + } + // Fetch the action to get the creator address for verification + act, err := task.LumeraClient.GetAction(ctx, actionID) + if err != nil { + return fmt.Errorf("get action for signature verification: %w", err) + } + creator := act.GetAction().Creator + sigBytes, err := base64.StdEncoding.DecodeString(signature) + if err != nil { + return fmt.Errorf("invalid base64 signature: %w", err) + } + if err := task.LumeraClient.Verify(ctx, creator, []byte(actionID), sigBytes); err != nil { + return err + } + return nil +} + +func (task *CascadeRegistrationTask) streamDownloadEvent(eventType SupernodeEventType, msg, filePath, dir string, send func(resp *DownloadResponse) error) { + _ = send(&DownloadResponse{EventType: eventType, Message: msg, FilePath: filePath, DownloadedDir: dir}) +} + +func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, actionID string, metadata actiontypes.CascadeMetadata, fields logtrace.Fields, send func(resp *DownloadResponse) error) (string, string, error) { + var layout codec.Layout + var layoutFetchMS, layoutDecodeMS int64 + var layoutAttempts int + + // Retrieve via index IDs + if len(metadata.RqIdsIds) > 0 { + for _, indexID := range metadata.RqIdsIds { + iStart := time.Now() + logtrace.Debug(ctx, "RPC Retrieve index file", logtrace.Fields{"index_id": indexID}) + indexFile, err := task.P2PClient.Retrieve(ctx, indexID) + if err != nil || len(indexFile) == 0 { + logtrace.Warn(ctx, "Retrieve index file failed or empty", logtrace.Fields{"index_id": indexID, logtrace.FieldError: fmt.Sprintf("%v", err)}) + continue + } + logtrace.Debug(ctx, "Retrieve index file completed", logtrace.Fields{"index_id": indexID, "bytes": len(indexFile), "ms": time.Since(iStart).Milliseconds()}) + indexData, err := cascadekit.ParseCompressedIndexFile(indexFile) + if err != nil { + logtrace.Warn(ctx, "failed to parse index file", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error()}) + continue + } + var netMS, decMS int64 + var attempts int + layout, netMS, decMS, attempts, err = task.retrieveLayoutFromIndex(ctx, indexData, fields) + if err != nil { + logtrace.Warn(ctx, "failed to retrieve layout from index", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error(), "attempts": attempts}) + continue + } + layoutFetchMS, layoutDecodeMS, layoutAttempts = netMS, decMS, attempts + if len(layout.Blocks) > 0 { + logtrace.Debug(ctx, "layout file retrieved via index", logtrace.Fields{"index_id": indexID, "attempts": attempts, "net_ms": layoutFetchMS, "decode_ms": layoutDecodeMS}) + break + } + } + } + if len(layout.Blocks) == 0 { + return "", "", errors.New("no symbols found in RQ metadata") + } + fields["layout_fetch_ms"], fields["layout_decode_ms"], fields["layout_attempts"] = layoutFetchMS, layoutDecodeMS, layoutAttempts + return task.restoreFileFromLayout(ctx, layout, metadata.DataHash, actionID, send) +} + +func (task *CascadeRegistrationTask) restoreFileFromLayout(ctx context.Context, layout codec.Layout, dataHash string, actionID string, send func(resp *DownloadResponse) error) (string, string, error) { + fields := logtrace.Fields{logtrace.FieldActionID: actionID} + symSet := make(map[string]struct{}) + for _, block := range layout.Blocks { + for _, s := range block.Symbols { + symSet[s] = struct{}{} + } + } + allSymbols := make([]string, 0, len(symSet)) + for s := range symSet { + allSymbols = append(allSymbols, s) + } + sort.Strings(allSymbols) + totalSymbols := len(allSymbols) + fields["totalSymbols"] = totalSymbols + targetRequiredCount := (totalSymbols*targetRequiredPercent + 99) / 100 + if targetRequiredCount < 1 && totalSymbols > 0 { + targetRequiredCount = 1 + } + logtrace.Info(ctx, "download: plan symbols", logtrace.Fields{"total_symbols": totalSymbols, "target_required_percent": targetRequiredPercent, "target_required_count": targetRequiredCount}) + retrieveStart := time.Now() + reqCount := targetRequiredCount + if reqCount > totalSymbols { + reqCount = totalSymbols + } + rStart := time.Now() + logtrace.Info(ctx, "download: batch retrieve start", logtrace.Fields{"action_id": actionID, "requested": reqCount, "total_candidates": totalSymbols}) + symbols, err := task.P2PClient.BatchRetrieve(ctx, allSymbols, reqCount, actionID) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "batch retrieve failed", fields) + return "", "", fmt.Errorf("batch retrieve symbols: %w", err) + } + retrieveMS := time.Since(retrieveStart).Milliseconds() + logtrace.Info(ctx, "download: batch retrieve ok", logtrace.Fields{"action_id": actionID, "received": len(symbols), "ms": time.Since(rStart).Milliseconds()}) + decodeStart := time.Now() + dStart := time.Now() + logtrace.Info(ctx, "download: decode start", logtrace.Fields{"action_id": actionID}) + decodeInfo, err := task.RQ.Decode(ctx, adaptors.DecodeRequest{ActionID: actionID, Symbols: symbols, Layout: layout}) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "decode failed", fields) + return "", "", fmt.Errorf("decode symbols using RaptorQ: %w", err) + } + decodeMS := time.Since(decodeStart).Milliseconds() + logtrace.Info(ctx, "download: decode ok", logtrace.Fields{"action_id": actionID, "ms": time.Since(dStart).Milliseconds(), "tmp_dir": decodeInfo.DecodeTmpDir, "file_path": decodeInfo.FilePath}) + // Emit timing metrics for network retrieval and decode phases + logtrace.Debug(ctx, "download: timing", logtrace.Fields{"action_id": actionID, "retrieve_ms": retrieveMS, "decode_ms": decodeMS}) + + // Verify reconstructed file hash matches action metadata + fileHash, herr := crypto.HashFileIncrementally(decodeInfo.FilePath, 0) + if herr != nil { + fields[logtrace.FieldError] = herr.Error() + logtrace.Error(ctx, "failed to hash file", fields) + return "", "", fmt.Errorf("hash file: %w", herr) + } + if fileHash == nil { + fields[logtrace.FieldError] = "file hash is nil" + logtrace.Error(ctx, "failed to hash file", fields) + return "", "", errors.New("file hash is nil") + } + if verr := cascadekit.VerifyB64DataHash(fileHash, dataHash); verr != nil { + fields[logtrace.FieldError] = verr.Error() + logtrace.Error(ctx, "failed to verify hash", fields) + return "", decodeInfo.DecodeTmpDir, verr + } + logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) + logtrace.Info(ctx, "download: file verified", fields) + // Emit minimal JSON payload (metrics system removed) + info := map[string]interface{}{"action_id": actionID, "found_symbols": len(symbols), "target_percent": targetRequiredPercent} + if b, err := json.Marshal(info); err == nil { + task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), decodeInfo.FilePath, decodeInfo.DecodeTmpDir, send) + } + return decodeInfo.FilePath, decodeInfo.DecodeTmpDir, nil +} diff --git a/supernode/cascade/download_helpers.go b/supernode/cascade/download_helpers.go new file mode 100644 index 00000000..73631549 --- /dev/null +++ b/supernode/cascade/download_helpers.go @@ -0,0 +1,47 @@ +package cascade + +import ( + "context" + "time" + + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" +) + +// retrieveLayoutFromIDs tries the given layout IDs in order and returns the first valid layout. +func (task *CascadeRegistrationTask) retrieveLayoutFromIDs(ctx context.Context, layoutIDs []string, fields logtrace.Fields) (codec.Layout, int64, int64, int, error) { + var layout codec.Layout + var netMS, decMS int64 + attempts := 0 + for _, lid := range layoutIDs { + attempts++ + nStart := time.Now() + logtrace.Debug(ctx, "RPC Retrieve layout file", logtrace.Fields{"layout_id": lid}) + raw, err := task.P2PClient.Retrieve(ctx, lid) + if err != nil || len(raw) == 0 { + logtrace.Warn(ctx, "Retrieve layout failed or empty", logtrace.Fields{"layout_id": lid, logtrace.FieldError: err}) + continue + } + netMS = time.Since(nStart).Milliseconds() + dStart := time.Now() + // Layout files are stored as compressed RQ metadata: base64(JSON(layout)).signature.counter + // Use the cascadekit parser to decompress and decode instead of JSON-unmarshalling raw bytes. + parsedLayout, _, _, err := cascadekit.ParseRQMetadataFile(raw) + if err != nil { + logtrace.Warn(ctx, "Parse layout file failed", logtrace.Fields{"layout_id": lid, logtrace.FieldError: err}) + continue + } + layout = parsedLayout + decMS = time.Since(dStart).Milliseconds() + if len(layout.Blocks) > 0 { + return layout, netMS, decMS, attempts, nil + } + } + return codec.Layout{}, netMS, decMS, attempts, nil +} + +// retrieveLayoutFromIndex resolves layout IDs in the index file and tries to fetch a valid layout. +func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context, index cascadekit.IndexFile, fields logtrace.Fields) (codec.Layout, int64, int64, int, error) { + return task.retrieveLayoutFromIDs(ctx, index.LayoutIDs, fields) +} diff --git a/supernode/services/cascade/events.go b/supernode/cascade/events.go similarity index 100% rename from supernode/services/cascade/events.go rename to supernode/cascade/events.go diff --git a/supernode/cascade/helper.go b/supernode/cascade/helper.go new file mode 100644 index 00000000..2d204c52 --- /dev/null +++ b/supernode/cascade/helper.go @@ -0,0 +1,210 @@ +package cascade + +import ( + "context" + "encoding/base64" + "strconv" + + "cosmossdk.io/math" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" + + "github.com/LumeraProtocol/supernode/v2/supernode/adaptors" + + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (task *CascadeRegistrationTask) fetchAction(ctx context.Context, actionID string, f logtrace.Fields) (*actiontypes.Action, error) { + if f == nil { + f = logtrace.Fields{} + } + f[logtrace.FieldActionID] = actionID + logtrace.Info(ctx, "register: fetch action start", f) + res, err := task.LumeraClient.GetAction(ctx, actionID) + if err != nil { + return nil, task.wrapErr(ctx, "failed to get action", err, f) + } + if res.GetAction().ActionID == "" { + return nil, task.wrapErr(ctx, "action not found", errors.New(""), f) + } + logtrace.Info(ctx, "register: fetch action ok", f) + return res.GetAction(), nil +} + +func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, blockHeight uint64, f logtrace.Fields) error { + if f == nil { + f = logtrace.Fields{} + } + f[logtrace.FieldBlockHeight] = blockHeight + logtrace.Info(ctx, "register: top-supernodes fetch start", f) + top, err := task.LumeraClient.GetTopSupernodes(ctx, blockHeight) + if err != nil { + return task.wrapErr(ctx, "failed to get top SNs", err, f) + } + logtrace.Info(ctx, "register: top-supernodes fetch ok", f) + if !supernode.Exists(top.Supernodes, task.SupernodeAccountAddress) { + addresses := make([]string, len(top.Supernodes)) + for i, sn := range top.Supernodes { + addresses[i] = sn.SupernodeAccount + } + logtrace.Debug(ctx, "Supernode not in top list", logtrace.Fields{"currentAddress": task.SupernodeAccountAddress, "topSupernodes": addresses}) + return task.wrapErr(ctx, "current supernode does not exist in the top SNs list", errors.Errorf("current address: %s, top supernodes: %v", task.SupernodeAccountAddress, addresses), f) + } + logtrace.Info(ctx, "register: top-supernode verified", f) + return nil +} + +func (task *CascadeRegistrationTask) encodeInput(ctx context.Context, actionID string, filePath string, f logtrace.Fields) (*adaptors.EncodeResult, error) { + if f == nil { + f = logtrace.Fields{} + } + f[logtrace.FieldActionID] = actionID + f["file_path"] = filePath + logtrace.Info(ctx, "register: encode input start", f) + res, err := task.RQ.EncodeInput(ctx, actionID, filePath) + if err != nil { + return nil, task.wrapErr(ctx, "failed to encode data", err, f) + } + // Enrich fields with result for subsequent logs + f["symbols_dir"] = res.SymbolsDir + logtrace.Info(ctx, "register: encode input ok", f) + return &res, nil +} + +// ValidateIndexAndLayout verifies: +// - creator signature over the index payload (index_b64) +// - layout signature over base64(JSON(layout)) +// Returns the decoded index and layoutB64. No logging here; callers handle it. +func (task *CascadeRegistrationTask) validateIndexAndLayout(ctx context.Context, creator string, indexSignatureFormat string, layout codec.Layout) (cascadekit.IndexFile, []byte, error) { + // Extract and verify creator signature on index + indexB64, creatorSigB64, err := cascadekit.ExtractIndexAndCreatorSig(indexSignatureFormat) + if err != nil { + return cascadekit.IndexFile{}, nil, err + } + creatorSig, err := base64.StdEncoding.DecodeString(creatorSigB64) + if err != nil { + return cascadekit.IndexFile{}, nil, err + } + if err := task.LumeraClient.Verify(ctx, creator, []byte(indexB64), creatorSig); err != nil { + return cascadekit.IndexFile{}, nil, err + } + // Decode index + indexFile, err := cascadekit.DecodeIndexB64(indexB64) + if err != nil { + return cascadekit.IndexFile{}, nil, err + } + // Build layoutB64 and verify single-block + signature + layoutB64, err := cascadekit.LayoutB64(layout) + if err != nil { + return cascadekit.IndexFile{}, nil, err + } + if err := cascadekit.VerifySingleBlock(layout); err != nil { + return cascadekit.IndexFile{}, nil, err + } + layoutSig, err := base64.StdEncoding.DecodeString(indexFile.LayoutSignature) + if err != nil { + return cascadekit.IndexFile{}, nil, err + } + if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSig); err != nil { + return cascadekit.IndexFile{}, nil, err + } + return indexFile, layoutB64, nil +} + +func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta actiontypes.CascadeMetadata, layoutSigB64 string, layoutB64 []byte, f logtrace.Fields) ([]string, [][]byte, error) { + if f == nil { + f = logtrace.Fields{} + } + f["rq_ic"] = uint32(meta.RqIdsIc) + f["rq_max"] = uint32(meta.RqIdsMax) + logtrace.Info(ctx, "register: rqid files generation start", f) + + layoutIDs, layoutFiles, err := cascadekit.GenerateLayoutFilesFromB64(layoutB64, layoutSigB64, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) + if err != nil { + return nil, nil, task.wrapErr(ctx, "failed to generate layout files", err, f) + } + logtrace.Info(ctx, "register: layout files generated", logtrace.Fields{"count": len(layoutFiles), "layout_ids": len(layoutIDs)}) + indexIDs, indexFiles, err := cascadekit.GenerateIndexFiles(meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) + if err != nil { + return nil, nil, task.wrapErr(ctx, "failed to generate index files", err, f) + } + allFiles := append(layoutFiles, indexFiles...) + logtrace.Info(ctx, "register: index files generated", logtrace.Fields{"count": len(indexFiles), "rqids": len(indexIDs)}) + logtrace.Info(ctx, "register: rqid files generation ok", logtrace.Fields{"total_files": len(allFiles)}) + return indexIDs, allFiles, nil +} + +func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionID string, idFiles [][]byte, symbolsDir string, f logtrace.Fields) error { + if f == nil { + f = logtrace.Fields{} + } + lf := logtrace.Fields{logtrace.FieldActionID: actionID, logtrace.FieldTaskID: task.taskID, "id_files_count": len(idFiles), "symbols_dir": symbolsDir} + for k, v := range f { + lf[k] = v + } + ctx = logtrace.CtxWithOrigin(ctx, "first_pass") + logtrace.Info(ctx, "store: first-pass begin", lf) + if err := task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{IDFiles: idFiles, SymbolsDir: symbolsDir, TaskID: task.taskID, ActionID: actionID}, f); err != nil { + return task.wrapErr(ctx, "failed to store artefacts", err, lf) + } + logtrace.Info(ctx, "store: first-pass ok", lf) + return nil +} + +func (task *CascadeRegistrationTask) wrapErr(ctx context.Context, msg string, err error, f logtrace.Fields) error { + if err != nil { + f[logtrace.FieldError] = err.Error() + } + logtrace.Error(ctx, msg, f) + if err != nil { + return status.Errorf(codes.Internal, "%s: %v", msg, err) + } + return status.Errorf(codes.Internal, "%s", msg) +} + +func (task *CascadeRegistrationTask) emitArtefactsStored(ctx context.Context, fields logtrace.Fields, _ codec.Layout, send func(resp *RegisterResponse) error) { + if fields == nil { + fields = logtrace.Fields{} + } + msg := "Artefacts stored" + logtrace.Info(ctx, "register: artefacts stored", fields) + task.streamEvent(SupernodeEventTypeArtefactsStored, msg, "", send) +} + +func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action *actiontypes.Action, dataSize int, fields logtrace.Fields) error { + if fields == nil { + fields = logtrace.Fields{} + } + fields["data_bytes"] = dataSize + logtrace.Info(ctx, "register: verify action fee start", fields) + // Round up to the nearest KB to avoid underestimating required fee + dataSizeInKBs := (dataSize + 1023) / 1024 + fee, err := task.LumeraClient.GetActionFee(ctx, strconv.Itoa(dataSizeInKBs)) + if err != nil { + return task.wrapErr(ctx, "failed to get action fee", err, fields) + } + amount, err := strconv.ParseInt(fee.Amount, 10, 64) + if err != nil { + return task.wrapErr(ctx, "failed to parse fee amount", err, fields) + } + requiredFee := sdk.NewCoin("ulume", math.NewInt(amount)) + logtrace.Debug(ctx, "calculated required fee", logtrace.Fields{"fee": requiredFee.String(), "dataBytes": dataSize}) + // Accept paying more than the minimum required fee. Only enforce denom match and Amount >= required. + if action.Price == nil { + return task.wrapErr(ctx, "insufficient fee", errors.Errorf("expected at least %s, got ", requiredFee.String()), fields) + } + if action.Price.Denom != requiredFee.Denom { + return task.wrapErr(ctx, "invalid fee denom", errors.Errorf("expected denom %s, got %s", requiredFee.Denom, action.Price.Denom), fields) + } + if action.Price.Amount.LT(requiredFee.Amount) { + return task.wrapErr(ctx, "insufficient fee", errors.Errorf("expected at least %s, got %s", requiredFee.String(), action.Price.String()), fields) + } + logtrace.Info(ctx, "register: verify action fee ok", logtrace.Fields{"required_fee": requiredFee.String(), "provided_fee": action.Price.String()}) + return nil +} diff --git a/supernode/services/cascade/interfaces.go b/supernode/cascade/interfaces.go similarity index 74% rename from supernode/services/cascade/interfaces.go rename to supernode/cascade/interfaces.go index e782bc23..5a4d0d4e 100644 --- a/supernode/services/cascade/interfaces.go +++ b/supernode/cascade/interfaces.go @@ -6,7 +6,7 @@ import ( // CascadeServiceFactory defines an interface to create cascade tasks // -//go:generate mockgen -destination=mocks/cascade_interfaces_mock.go -package=cascademocks -source=interfaces.go + type CascadeServiceFactory interface { NewCascadeRegistrationTask() CascadeTask } @@ -15,5 +15,5 @@ type CascadeServiceFactory interface { type CascadeTask interface { Register(ctx context.Context, req *RegisterRequest, send func(resp *RegisterResponse) error) error Download(ctx context.Context, req *DownloadRequest, send func(resp *DownloadResponse) error) error - CleanupDownload(ctx context.Context, actionID string) error + CleanupDownload(ctx context.Context, tmpDir string) error } diff --git a/supernode/services/cascade/register.go b/supernode/cascade/register.go similarity index 52% rename from supernode/services/cascade/register.go rename to supernode/cascade/register.go index 866420aa..926f9b31 100644 --- a/supernode/services/cascade/register.go +++ b/supernode/cascade/register.go @@ -6,7 +6,6 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" ) // RegisterRequest contains parameters for upload request @@ -25,46 +24,21 @@ type RegisterResponse struct { TxHash string } -// Register processes the upload request for cascade input data. -// 1- Fetch & validate action (it should be a cascade action registered on the chain) -// 2- Ensure this super-node is eligible to process the action (should be in the top supernodes list for the action block height) -// 3- Get the cascade metadata from the action: it contains the data hash and the signatures -// -// Assuming data hash is a base64 encoded string of blake3 hash of the data -// The signatures field is: b64(JSON(Layout)).Signature where Layout is codec.Layout -// The layout is a JSON object that contains the metadata of the data -// -// 4- Verify the data hash (the data hash should match the one in the action ticket) - again, hash function should be blake3 -// 5- Generate Symbols with codec (RQ-Go Library) (the data should be encoded using the codec) -// 6- Extract the layout and the signature from Step 3. Verify the signature using the creator's public key (creator address is in the action) -// 7- Generate RQ-ID files from the layout that we generated locally and then match those with the ones in the action -// 8- Verify the IDs in the layout and the metadata (the IDs should match the ones in the action) -// 9- Store the artefacts in P2P Storage (the redundant metadata files and the symbols from the symbols dir) func (task *CascadeRegistrationTask) Register( ctx context.Context, req *RegisterRequest, send func(resp *RegisterResponse) error, ) (err error) { - // Seed correlation ID and origin so logs across layers can be joined and filtered + // Step 1: Correlate context and capture task identity if req != nil && req.ActionID != "" { ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) ctx = logtrace.CtxWithOrigin(ctx, "first_pass") + task.taskID = req.TaskID } + // Step 2: Log request and ensure uploaded file cleanup fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} logtrace.Info(ctx, "register: request", fields) - - // Ensure task status and resources are finalized regardless of outcome - defer func() { - if err != nil { - task.UpdateStatus(common.StatusTaskCanceled) - } else { - task.UpdateStatus(common.StatusTaskCompleted) - } - task.Cancel() - }() - - // Always attempt to remove the uploaded file path defer func() { if req != nil && req.FilePath != "" { if remErr := os.RemoveAll(req.FilePath); remErr != nil { @@ -75,7 +49,7 @@ func (task *CascadeRegistrationTask) Register( } }() - /* 1. Fetch & validate action -------------------------------------------------- */ + // Step 3: Fetch the action details action, err := task.fetchAction(ctx, req.ActionID, fields) if err != nil { return err @@ -87,22 +61,22 @@ func (task *CascadeRegistrationTask) Register( logtrace.Info(ctx, "register: action fetched", fields) task.streamEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", send) - /* 2. Verify action fee -------------------------------------------------------- */ + // Step 4: Verify action fee based on data size (rounded up to KB) if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil { return err } logtrace.Info(ctx, "register: fee verified", fields) task.streamEvent(SupernodeEventTypeActionFeeVerified, "Action fee verified", "", send) - /* 3. Ensure this super-node is eligible -------------------------------------- */ - fields[logtrace.FieldSupernodeState] = task.config.SupernodeAccountAddress + // Step 5: Ensure this node is eligible (top supernode for block) + fields[logtrace.FieldSupernodeState] = task.SupernodeAccountAddress if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil { return err } logtrace.Info(ctx, "register: top supernode confirmed", fields) task.streamEvent(SupernodeEventTypeTopSupernodeCheckPassed, "Top supernode eligibility confirmed", "", send) - /* 4. Decode cascade metadata -------------------------------------------------- */ + // Step 6: Decode Cascade metadata from the action cascadeMeta, err := cascadekit.UnmarshalCascadeMetadata(action.Metadata) if err != nil { return task.wrapErr(ctx, "failed to unmarshal cascade metadata", err, fields) @@ -110,7 +84,7 @@ func (task *CascadeRegistrationTask) Register( logtrace.Info(ctx, "register: metadata decoded", fields) task.streamEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", send) - /* 5. Verify data hash --------------------------------------------------------- */ + // Step 7: Verify request-provided data hash matches metadata if err := cascadekit.VerifyB64DataHash(req.DataHash, cascadeMeta.DataHash); err != nil { return err } @@ -118,65 +92,66 @@ func (task *CascadeRegistrationTask) Register( logtrace.Info(ctx, "register: data hash matched", fields) task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send) - /* 6. Encode the raw data ------------------------------------------------------ */ - encResp, err := task.encodeInput(ctx, req.ActionID, req.FilePath, req.DataSize, fields) + // Step 8: Encode input using the RQ codec to produce layout and symbols + encodeResult, err := task.encodeInput(ctx, req.ActionID, req.FilePath, fields) if err != nil { return err } - // Promote to Info and include symbols directory for quick visibility - fields["symbols_dir"] = encResp.SymbolsDir + fields["symbols_dir"] = encodeResult.SymbolsDir logtrace.Info(ctx, "register: input encoded", fields) task.streamEvent(SupernodeEventTypeInputEncoded, "Input encoded", "", send) - /* 7. Signature verification + layout decode ---------------------------------- */ - layout, signature, err := task.verifySignatureAndDecodeLayout( - ctx, cascadeMeta.Signatures, action.Creator, encResp.Metadata, fields, - ) - if err != nil { - return err + // Step 9: Verify index and layout signatures; produce layoutB64 + logtrace.Info(ctx, "register: verify+decode layout start", fields) + indexFile, layoutB64, vErr := task.validateIndexAndLayout(ctx, action.Creator, cascadeMeta.Signatures, encodeResult.Layout) + if vErr != nil { + return task.wrapErr(ctx, "signature or index validation failed", vErr, fields) } + layoutSignatureB64 := indexFile.LayoutSignature logtrace.Info(ctx, "register: signature verified", fields) task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) - /* 8. Generate RQ-ID files ----------------------------------------------------- */ - rqidResp, err := task.generateRQIDFiles(ctx, cascadeMeta, signature, action.Creator, encResp.Metadata, fields) + // Step 10: Generate RQID files (layout and index) and compute IDs + rqIDs, idFiles, err := task.generateRQIDFiles(ctx, cascadeMeta, layoutSignatureB64, layoutB64, fields) if err != nil { return err } - // Include count of ID files generated for visibility - fields["id_files_count"] = len(rqidResp.RedundantMetadataFiles) + + // Calculate combined size of all index and layout files + totalSize := 0 + for _, file := range idFiles { + totalSize += len(file) + } + + fields["id_files_count"] = len(idFiles) + fields["rqids_count"] = len(rqIDs) + fields["combined_files_size_bytes"] = totalSize + fields["combined_files_size_kb"] = float64(totalSize) / 1024 + fields["combined_files_size_mb"] = float64(totalSize) / (1024 * 1024) logtrace.Info(ctx, "register: rqid files generated", fields) task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send) - /* 9. Consistency checks ------------------------------------------------------- */ - if err := cascadekit.VerifySingleBlockIDs(layout, encResp.Metadata); err != nil { - return task.wrapErr(ctx, "failed to verify IDs", err, fields) - } logtrace.Info(ctx, "register: rqids validated", fields) task.streamEvent(SupernodeEventTypeRqIDsVerified, "RQIDs verified", "", send) - /* 10. Simulate finalize to avoid storing artefacts if it would fail ---------- */ - if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqidResp.RQIDs); err != nil { + // Step 11: Simulate finalize to ensure the tx will succeed + if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqIDs); err != nil { fields[logtrace.FieldError] = err.Error() logtrace.Info(ctx, "register: finalize simulation failed", fields) - // Emit explicit simulation failure event for client visibility task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) return task.wrapErr(ctx, "finalize action simulation failed", err, fields) } logtrace.Info(ctx, "register: finalize simulation passed", fields) - // Transmit as a standard event so SDK can propagate it (dedicated type) task.streamEvent(SupernodeEventTypeFinalizeSimulated, "Finalize simulation passed", "", send) - /* 11. Persist artefacts -------------------------------------------------------- */ - // Persist artefacts to the P2P network. P2P interfaces return error only; - // metrics are summarized at the cascade layer and emitted via event. - if err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields); err != nil { + // Step 12: Store artefacts to the network store + if err := task.storeArtefacts(ctx, action.ActionID, idFiles, encodeResult.SymbolsDir, fields); err != nil { return err } - // Emit artefacts stored event (metrics payload removed; logs preserved) - task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) + task.emitArtefactsStored(ctx, fields, encodeResult.Layout, send) - resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) + // Step 13: Finalize the action on-chain + resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqIDs) if err != nil { fields[logtrace.FieldError] = err.Error() logtrace.Info(ctx, "register: finalize action error", fields) @@ -186,6 +161,5 @@ func (task *CascadeRegistrationTask) Register( fields[logtrace.FieldTxHash] = txHash logtrace.Info(ctx, "register: action finalized", fields) task.streamEvent(SupernodeEventTypeActionFinalized, "Action finalized", txHash, send) - return nil } diff --git a/supernode/cascade/service.go b/supernode/cascade/service.go new file mode 100644 index 00000000..29b047bd --- /dev/null +++ b/supernode/cascade/service.go @@ -0,0 +1,42 @@ +package cascade + +import ( + "context" + + "github.com/LumeraProtocol/supernode/v2/p2p" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/v2/supernode/adaptors" +) + +type CascadeService struct { + LumeraClient adaptors.LumeraClient + P2P adaptors.P2PService + RQ adaptors.CodecService + P2PClient p2p.Client + SupernodeAccountAddress string +} + +// Compile-time checks to ensure CascadeService implements required interfaces +var _ CascadeServiceFactory = (*CascadeService)(nil) + +// NewCascadeRegistrationTask creates a new task for cascade registration +func (service *CascadeService) NewCascadeRegistrationTask() CascadeTask { + task := NewCascadeRegistrationTask(service) + return task +} + +// Run starts the service (no background workers) +func (service *CascadeService) Run(ctx context.Context) error { <-ctx.Done(); return nil } + +// NewCascadeService returns a new CascadeService instance +func NewCascadeService(supernodeAccountAddress string, lumera lumera.Client, p2pClient p2p.Client, codec codec.Codec, rqstore rqstore.Store) *CascadeService { + return &CascadeService{ + LumeraClient: adaptors.NewLumeraClient(lumera), + P2P: adaptors.NewP2PService(p2pClient, rqstore), + RQ: adaptors.NewCodecService(codec), + P2PClient: p2pClient, + SupernodeAccountAddress: supernodeAccountAddress, + } +} diff --git a/supernode/cascade/task.go b/supernode/cascade/task.go new file mode 100644 index 00000000..71725d20 --- /dev/null +++ b/supernode/cascade/task.go @@ -0,0 +1,20 @@ +package cascade + +// CascadeRegistrationTask is the task for cascade registration +type CascadeRegistrationTask struct { + *CascadeService + + taskID string +} + +var _ CascadeTask = (*CascadeRegistrationTask)(nil) + +// NewCascadeRegistrationTask returns a new Task instance +func NewCascadeRegistrationTask(service *CascadeService) *CascadeRegistrationTask { + return &CascadeRegistrationTask{CascadeService: service} +} + +// streamEvent sends a RegisterResponse via the provided callback. +func (task *CascadeRegistrationTask) streamEvent(eventType SupernodeEventType, msg, txHash string, send func(resp *RegisterResponse) error) { + _ = send(&RegisterResponse{EventType: eventType, Message: msg, TxHash: txHash}) +} diff --git a/supernode/cmd/init.go b/supernode/cmd/init.go index c4048eb0..a9d01cb5 100644 --- a/supernode/cmd/init.go +++ b/supernode/cmd/init.go @@ -15,7 +15,7 @@ import ( "github.com/AlecAivazis/survey/v2" "github.com/LumeraProtocol/supernode/v2/pkg/keyring" "github.com/LumeraProtocol/supernode/v2/supernode/config" - consmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" + cKeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/spf13/cobra" ) @@ -36,15 +36,7 @@ var ( passphraseFile string ) -// Default configuration values -const ( - DefaultKeyringBackend = "test" - DefaultKeyName = "test-key" - DefaultSupernodeAddr = "0.0.0.0" - DefaultSupernodePort = 4444 - DefaultLumeraGRPC = "localhost:9090" - DefaultChainID = "testing" -) +// Default configuration values centralized in config package // InitInputs holds all user inputs for initialization type InitInputs struct { @@ -221,7 +213,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5a: Determine keyring backend (how keys are stored securely) // Options: 'test' (unencrypted), 'file' (encrypted file), 'os' (system keyring) - backend := DefaultKeyringBackend + backend := config.DefaultKeyringBackend if keyringBackendFlag != "" { backend = keyringBackendFlag } @@ -233,7 +225,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5b: Set the name for the cryptographic key // This name is used to reference the key in the keyring - keyName := DefaultKeyName + keyName := config.DefaultKeyName if keyNameFlag != "" { keyName = keyNameFlag @@ -245,7 +237,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5c: Configure the supernode's network binding address // Determines which network interface the supernode will listen on - supernodeAddr := DefaultSupernodeAddr + supernodeAddr := config.DefaultSupernodeHost if supernodeAddrFlag != "" { supernodeAddr = supernodeAddrFlag @@ -256,7 +248,7 @@ func gatherUserInputs() (InitInputs, error) { } // Step 5d: Set the port for supernode peer-to-peer communication - supernodePort := DefaultSupernodePort + supernodePort := int(config.DefaultSupernodePort) if supernodePortFlag != 0 { supernodePort = supernodePortFlag @@ -268,7 +260,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5e: Configure connection to the Lumera blockchain node // This is the GRPC endpoint for blockchain interactions - lumeraGRPC := DefaultLumeraGRPC + lumeraGRPC := config.DefaultLumeraGRPC if lumeraGrpcFlag != "" { lumeraGRPC = lumeraGrpcFlag @@ -280,7 +272,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5f: Set the blockchain network identifier // Must match the chain ID of the Lumera network you're connecting to - chainID := DefaultChainID + chainID := config.DefaultChainID if chainIDFlag != "" { chainID = chainIDFlag } @@ -419,7 +411,7 @@ func setupKeyring(keyName string, shouldRecover bool, mnemonic string) (string, } // recoverExistingKey handles the recovery of an existing key from mnemonic -func recoverExistingKey(kr consmoskeyring.Keyring, keyName, mnemonic string) (string, error) { +func recoverExistingKey(kr cKeyring.Keyring, keyName, mnemonic string) (string, error) { // Process and validate mnemonic using helper function processedMnemonic, err := processAndValidateMnemonic(mnemonic) if err != nil { @@ -444,7 +436,7 @@ func recoverExistingKey(kr consmoskeyring.Keyring, keyName, mnemonic string) (st } // createNewKey handles the creation of a new key -func createNewKey(kr consmoskeyring.Keyring, keyName string) (string, string, error) { +func createNewKey(kr cKeyring.Keyring, keyName string) (string, string, error) { // Generate mnemonic and create new account keyMnemonic, _, err := keyring.CreateNewAccount(kr, keyName) if err != nil { @@ -497,7 +489,7 @@ func promptKeyringBackend(passedBackend string) (string, error) { } backend = passedBackend } else { - backend = DefaultKeyringBackend + backend = config.DefaultKeyringBackend } prompt := &survey.Select{ Message: "Choose keyring backend:", @@ -565,24 +557,24 @@ func promptNetworkConfig(passedAddrs string, passedPort int, passedGRPC, passedC if passedAddrs != "" { supernodeAddr = passedAddrs } else { - supernodeAddr = DefaultSupernodeAddr + supernodeAddr = config.DefaultSupernodeHost } var port string if passedPort != 0 { port = fmt.Sprintf("%d", passedPort) } else { - port = fmt.Sprintf("%d", DefaultSupernodePort) + port = fmt.Sprintf("%d", config.DefaultSupernodePort) } if passedGRPC != "" { lumeraGrpcAddr = passedGRPC } else { - lumeraGrpcAddr = DefaultLumeraGRPC + lumeraGrpcAddr = config.DefaultLumeraGRPC } if passedChainID != "" { chainID = passedChainID } else { - chainID = DefaultChainID + chainID = config.DefaultChainID } // Supernode IP address diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 3d04f7a0..f2d81467 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -3,13 +3,12 @@ package cmd import ( "context" "fmt" - "net/http" - _ "net/http/pprof" "os" "os/signal" "path/filepath" "strings" "syscall" + "time" "github.com/LumeraProtocol/supernode/v2/p2p" "github.com/LumeraProtocol/supernode/v2/p2p/kademlia/store/cloud" @@ -17,18 +16,26 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/codec" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + grpcserver "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/server" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/v2/pkg/task" + cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/LumeraProtocol/supernode/v2/supernode/node/action/server/cascade" - "github.com/LumeraProtocol/supernode/v2/supernode/node/supernode/gateway" - "github.com/LumeraProtocol/supernode/v2/supernode/node/supernode/server" - cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" - supernodeService "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/services/verifier" + statusService "github.com/LumeraProtocol/supernode/v2/supernode/status" + "github.com/LumeraProtocol/supernode/v2/supernode/transport/gateway" + cascadeRPC "github.com/LumeraProtocol/supernode/v2/supernode/transport/grpc/cascade" + server "github.com/LumeraProtocol/supernode/v2/supernode/transport/grpc/status" + "github.com/LumeraProtocol/supernode/v2/supernode/verifier" cKeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/spf13/cobra" + + pbcascade "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" + + pbsupernode "github.com/LumeraProtocol/supernode/v2/gen/supernode" + + // Configure DHT advertised/minimum versions from build-time variables + "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" ) // startCmd represents the start command @@ -41,8 +48,18 @@ The supernode will connect to the Lumera network and begin participating in the // Initialize logging logtrace.Setup("supernode") + // Advertise our binary version to peers + kademlia.SetLocalVersion(Version) + // Optionally enforce a minimum peer version if provided at build time + if strings.TrimSpace(MinVer) != "" { + kademlia.SetMinVersion(MinVer) + } + // Create context with correlation ID for tracing ctx := logtrace.CtxWithCorrelationID(context.Background(), "supernode-start") + // Make the context cancelable for graceful shutdown + ctx, cancel := context.WithCancel(ctx) + defer cancel() // Log configuration info cfgFile := filepath.Join(baseDir, DefaultConfigFile) @@ -98,95 +115,108 @@ The supernode will connect to the Lumera network and begin participating in the logtrace.Fatal(ctx, "Failed to initialize P2P service", logtrace.Fields{"error": err.Error()}) } - // Initialize the supernode - supernodeInstance, err := NewSupernode(ctx, appConfig, kr, p2pService, rqStore, lumeraClient) - if err != nil { - logtrace.Fatal(ctx, "Failed to initialize supernode", logtrace.Fields{"error": err.Error()}) - } + // Supernode wrapper removed; components are managed directly // Configure cascade service cService := cascadeService.NewCascadeService( - &cascadeService.Config{ - Config: common.Config{ - SupernodeAccountAddress: appConfig.SupernodeConfig.Identity, - }, - RqFilesDir: appConfig.GetRaptorQFilesDir(), - }, + appConfig.SupernodeConfig.Identity, lumeraClient, - *p2pService, + p2pService, codec.NewRaptorQCodec(appConfig.GetRaptorQFilesDir()), rqStore, ) - // Create cascade action server - cascadeActionServer := cascade.NewCascadeActionServer(cService) + // Create a task tracker and cascade action server with DI + tr := task.New() + cascadeActionServer := cascadeRPC.NewCascadeActionServer(cService, tr, 0, 0) // Set the version in the status service package - supernodeService.Version = Version + statusService.Version = Version - // Create supernode status service - statusService := supernodeService.NewSupernodeStatusService(*p2pService, lumeraClient, appConfig) - statusService.RegisterTaskProvider(cService) + // Create supernode status service with injected tracker + statusSvc := statusService.NewSupernodeStatusService(p2pService, lumeraClient, appConfig, tr) // Create supernode server - supernodeServer := server.NewSupernodeServer(statusService) - - // Configure server - serverConfig := &server.Config{ - Identity: appConfig.SupernodeConfig.Identity, - ListenAddresses: appConfig.SupernodeConfig.Host, - Port: int(appConfig.SupernodeConfig.Port), - } - - // Create gRPC server - grpcServer, err := server.New(serverConfig, "service", kr, lumeraClient, cascadeActionServer, supernodeServer) + supernodeServer := server.NewSupernodeServer(statusSvc) + + // Create gRPC server (explicit args, no config struct) + grpcServer, err := server.New( + appConfig.SupernodeConfig.Identity, + appConfig.SupernodeConfig.Host, + int(appConfig.SupernodeConfig.Port), + "service", + kr, + lumeraClient, + grpcserver.ServiceDesc{Desc: &pbcascade.CascadeService_ServiceDesc, Service: cascadeActionServer}, + grpcserver.ServiceDesc{Desc: &pbsupernode.SupernodeService_ServiceDesc, Service: supernodeServer}, + ) if err != nil { logtrace.Fatal(ctx, "Failed to create gRPC server", logtrace.Fields{"error": err.Error()}) } // Create HTTP gateway server that directly calls the supernode server - gatewayServer, err := gateway.NewServer(appConfig.SupernodeConfig.Host, int(appConfig.SupernodeConfig.GatewayPort), supernodeServer) + // Pass chain ID for pprof configuration + gatewayServer, err := gateway.NewServerWithConfig( + appConfig.SupernodeConfig.Host, + int(appConfig.SupernodeConfig.GatewayPort), + supernodeServer, + appConfig.LumeraClientConfig.ChainID, + ) if err != nil { return fmt.Errorf("failed to create gateway server: %w", err) } - // Start profiling server on testnet only - isTestnet := strings.Contains(strings.ToLower(appConfig.LumeraClientConfig.ChainID), "testnet") + // Start the services using the standard runner and capture exit + servicesErr := make(chan error, 1) + go func() { servicesErr <- RunServices(ctx, grpcServer, cService, p2pService, gatewayServer) }() - if isTestnet && os.Getenv("INTEGRATION_TEST") != "true" { - profilingAddr := "0.0.0.0:8082" + // Set up signal handling for graceful shutdown + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + defer signal.Stop(sigCh) + + // Wait for either a termination signal or service exit + var triggeredBySignal bool + var runErr error + select { + case sig := <-sigCh: + triggeredBySignal = true + logtrace.Debug(ctx, "Received signal, shutting down", logtrace.Fields{"signal": sig.String()}) + case runErr = <-servicesErr: + if runErr != nil { + logtrace.Error(ctx, "Service error", logtrace.Fields{"error": runErr.Error()}) + } else { + logtrace.Debug(ctx, "Services exited", logtrace.Fields{}) + } + } - logtrace.Debug(ctx, "Starting profiling server", logtrace.Fields{ - "address": profilingAddr, - "chain_id": appConfig.LumeraClientConfig.ChainID, - "is_testnet": isTestnet, - }) + // Cancel context to signal all services + cancel() - go func() { - if err := http.ListenAndServe(profilingAddr, nil); err != nil { - logtrace.Error(ctx, "Profiling server error", logtrace.Fields{"error": err.Error()}) - } - }() - } + // Stop HTTP gateway and gRPC servers without blocking shutdown + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) + defer shutdownCancel() - // Start the services go func() { - if err := RunServices(ctx, grpcServer, cService, *p2pService, gatewayServer); err != nil { - logtrace.Error(ctx, "Service error", logtrace.Fields{"error": err.Error()}) + if err := gatewayServer.Stop(shutdownCtx); err != nil { + logtrace.Warn(ctx, "Gateway shutdown warning", logtrace.Fields{"error": err.Error()}) } }() + grpcServer.Close() - // Set up signal handling for graceful shutdown - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) - - // Wait for termination signal - sig := <-sigCh - logtrace.Debug(ctx, "Received signal, shutting down", logtrace.Fields{"signal": sig.String()}) + // Close Lumera client without blocking shutdown + logtrace.Debug(ctx, "Closing Lumera client", logtrace.Fields{}) + go func() { + if err := lumeraClient.Close(); err != nil { + logtrace.Error(ctx, "Error closing Lumera client", logtrace.Fields{"error": err.Error()}) + } + }() - // Graceful shutdown - if err := supernodeInstance.Stop(ctx); err != nil { - logtrace.Error(ctx, "Error during shutdown", logtrace.Fields{"error": err.Error()}) + // If we triggered shutdown by signal, wait for services to drain + if triggeredBySignal { + if err := <-servicesErr; err != nil { + logtrace.Error(ctx, "Service error on shutdown", logtrace.Fields{"error": err.Error()}) + } } return nil @@ -198,7 +228,7 @@ func init() { } // initP2PService initializes the P2P service -func initP2PService(ctx context.Context, config *config.Config, lumeraClient lumera.Client, kr cKeyring.Keyring, rqStore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore) (*p2p.P2P, error) { +func initP2PService(ctx context.Context, config *config.Config, lumeraClient lumera.Client, kr cKeyring.Keyring, rqStore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore) (p2p.P2P, error) { // Get the supernode address from the keyring keyInfo, err := kr.Key(config.SupernodeConfig.KeyName) if err != nil { @@ -219,5 +249,44 @@ func initP2PService(ctx context.Context, config *config.Config, lumeraClient lum return nil, fmt.Errorf("failed to initialize p2p service: %w", err) } - return &p2pService, nil + return p2pService, nil +} + +// initLumeraClient initializes the Lumera client based on configuration +func initLumeraClient(ctx context.Context, config *config.Config, kr cKeyring.Keyring) (lumera.Client, error) { + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + lumeraConfig, err := lumera.NewConfig(config.LumeraClientConfig.GRPCAddr, config.LumeraClientConfig.ChainID, config.SupernodeConfig.KeyName, kr) + if err != nil { + return nil, fmt.Errorf("failed to create Lumera config: %w", err) + } + return lumera.NewClient( + ctx, + lumeraConfig, + ) +} + +// initRQStore initializes the RaptorQ store for Cascade processing +func initRQStore(ctx context.Context, config *config.Config) (rqstore.Store, error) { + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + // Create RaptorQ store directory if it doesn't exist + rqDir := config.GetRaptorQFilesDir() + "/rq" + if err := os.MkdirAll(rqDir, 0700); err != nil { + return nil, fmt.Errorf("failed to create RQ store directory: %w", err) + } + + // Create the SQLite file path + rqStoreFile := rqDir + "/rqstore.db" + + logtrace.Debug(ctx, "Initializing RaptorQ store", logtrace.Fields{ + "file_path": rqStoreFile, + }) + + // Initialize RaptorQ store with SQLite + return rqstore.NewSQLiteRQStore(rqStoreFile) } diff --git a/supernode/cmd/supernode.go b/supernode/cmd/supernode.go deleted file mode 100644 index c0740fd0..00000000 --- a/supernode/cmd/supernode.go +++ /dev/null @@ -1,140 +0,0 @@ -package cmd - -import ( - "context" - "fmt" - "os" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" - "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/cosmos/cosmos-sdk/crypto/keyring" -) - -// Supernode represents a supernode in the Lumera network -type Supernode struct { - config *config.Config - lumeraClient lumera.Client - p2pService p2p.P2P - keyring keyring.Keyring - rqStore rqstore.Store - keyName string // String that represents the supernode account in keyring -} - -// NewSupernode creates a new supernode instance -func NewSupernode(ctx context.Context, config *config.Config, kr keyring.Keyring, - p2pClient *p2p.P2P, rqStore rqstore.Store, lumeraClient lumera.Client) (*Supernode, error) { - - if config == nil { - return nil, fmt.Errorf("config is nil") - } - - supernode := &Supernode{ - config: config, - lumeraClient: lumeraClient, - keyring: kr, - rqStore: rqStore, - p2pService: *p2pClient, - keyName: config.SupernodeConfig.KeyName, - } - - return supernode, nil -} - -// Start starts all supernode services -func (s *Supernode) Start(ctx context.Context) error { - // Verify that the key specified in config exists - keyInfo, err := s.keyring.Key(s.config.SupernodeConfig.KeyName) - if err != nil { - logtrace.Error(ctx, "Key not found in keyring", logtrace.Fields{ - "key_name": s.config.SupernodeConfig.KeyName, - "error": err.Error(), - }) - - // Provide helpful guidance - fmt.Printf("\nError: Key '%s' not found in keyring at %s\n", - s.config.SupernodeConfig.KeyName, s.config.GetKeyringDir()) - fmt.Println("\nPlease create the key first with one of these commands:") - fmt.Printf(" supernode keys add %s\n", s.config.SupernodeConfig.KeyName) - fmt.Printf(" supernode keys recover %s\n", s.config.SupernodeConfig.KeyName) - return fmt.Errorf("key not found") - } - - // Get the account address for logging - address, err := keyInfo.GetAddress() - if err != nil { - logtrace.Error(ctx, "Failed to get address from key", logtrace.Fields{ - "error": err.Error(), - }) - return err - } - - logtrace.Debug(ctx, "Found valid key in keyring", logtrace.Fields{ - "key_name": s.config.SupernodeConfig.KeyName, - "address": address.String(), - }) - - // Use the P2P service that was passed in via constructor - logtrace.Debug(ctx, "Starting P2P service", logtrace.Fields{}) - if err := s.p2pService.Run(ctx); err != nil { - return fmt.Errorf("p2p service error: %w", err) - } - - return nil -} - -// Stop stops all supernode services -func (s *Supernode) Stop(ctx context.Context) error { - // Close the Lumera client connection - if s.lumeraClient != nil { - logtrace.Debug(ctx, "Closing Lumera client", logtrace.Fields{}) - if err := s.lumeraClient.Close(); err != nil { - logtrace.Error(ctx, "Error closing Lumera client", logtrace.Fields{ - "error": err.Error(), - }) - } - } - - return nil -} - -// initLumeraClient initializes the Lumera client based on configuration -func initLumeraClient(ctx context.Context, config *config.Config, kr keyring.Keyring) (lumera.Client, error) { - if config == nil { - return nil, fmt.Errorf("config is nil") - } - - lumeraConfig, err := lumera.NewConfig(config.LumeraClientConfig.GRPCAddr, config.LumeraClientConfig.ChainID, config.SupernodeConfig.KeyName, kr) - if err != nil { - return nil, fmt.Errorf("failed to create Lumera config: %w", err) - } - return lumera.NewClient( - ctx, - lumeraConfig, - ) -} - -// initRQStore initializes the RaptorQ store for Cascade processing -func initRQStore(ctx context.Context, config *config.Config) (rqstore.Store, error) { - if config == nil { - return nil, fmt.Errorf("config is nil") - } - - // Create RaptorQ store directory if it doesn't exist - rqDir := config.GetRaptorQFilesDir() + "/rq" - if err := os.MkdirAll(rqDir, 0700); err != nil { - return nil, fmt.Errorf("failed to create RQ store directory: %w", err) - } - - // Create the SQLite file path - rqStoreFile := rqDir + "/rqstore.db" - - logtrace.Debug(ctx, "Initializing RaptorQ store", logtrace.Fields{ - "file_path": rqStoreFile, - }) - - // Initialize RaptorQ store with SQLite - return rqstore.NewSQLiteRQStore(rqStoreFile) -} diff --git a/supernode/cmd/version.go b/supernode/cmd/version.go index e6d085d8..9daaabc8 100644 --- a/supernode/cmd/version.go +++ b/supernode/cmd/version.go @@ -11,6 +11,8 @@ var ( Version = "dev" GitCommit = "unknown" BuildTime = "unknown" + // Optional: minimum peer version for DHT gating (empty disables gating) + MinVer = "" ) // versionCmd represents the version command diff --git a/supernode/config.yml b/supernode/config.yml index 3bbf8b7e..35d888a3 100644 --- a/supernode/config.yml +++ b/supernode/config.yml @@ -2,7 +2,9 @@ supernode: key_name: "mykey" # Account name for the supernode in keyring identity: "lumera1ccmw5plzuldntum2rz6kq6uq346vtrhrvwfzsa" # Identity of the supernode, lumera address + # You can set either 'host' (preferred) or 'ip_address' (legacy alias). host: "0.0.0.0" + # ip_address: "0.0.0.0" port: 4444 # Keyring Configuration diff --git a/supernode/config/config.go b/supernode/config/config.go index e3910ac2..d655391c 100644 --- a/supernode/config/config.go +++ b/supernode/config/config.go @@ -5,15 +5,18 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "gopkg.in/yaml.v3" ) type SupernodeConfig struct { - KeyName string `yaml:"key_name"` - Identity string `yaml:"identity"` - Host string `yaml:"host"` + KeyName string `yaml:"key_name"` + Identity string `yaml:"identity"` + Host string `yaml:"host"` + // IPAddress is an accepted alias for Host to support older configs + IPAddress string `yaml:"ip_address,omitempty"` Port uint16 `yaml:"port"` GatewayPort uint16 `yaml:"gateway_port,omitempty"` } @@ -127,6 +130,15 @@ func LoadConfig(filename string, baseDir string) (*Config, error) { return nil, fmt.Errorf("error parsing config file: %w", err) } + // Support both 'host' and legacy 'ip_address' fields. If 'host' is empty + // and 'ip_address' is provided, use it as the host value. + if strings.TrimSpace(config.SupernodeConfig.Host) == "" && strings.TrimSpace(config.SupernodeConfig.IPAddress) != "" { + config.SupernodeConfig.Host = strings.TrimSpace(config.SupernodeConfig.IPAddress) + logtrace.Debug(ctx, "Using ip_address as host", logtrace.Fields{ + "ip_address": config.SupernodeConfig.IPAddress, + }) + } + // Set the base directory config.BaseDir = baseDir diff --git a/supernode/config/defaults.go b/supernode/config/defaults.go new file mode 100644 index 00000000..d7915259 --- /dev/null +++ b/supernode/config/defaults.go @@ -0,0 +1,15 @@ +package config + +// Centralized default values for configuration + +const ( + DefaultKeyringBackend = "test" + DefaultKeyringDir = "keys" + DefaultKeyName = "test-key" + DefaultSupernodeHost = "0.0.0.0" + DefaultSupernodePort = 4444 + DefaultP2PPort = 4445 + DefaultLumeraGRPC = "localhost:9090" + DefaultChainID = "testing" + DefaultRaptorQFilesDir = "raptorq_files" +) diff --git a/supernode/config/save.go b/supernode/config/save.go index 5199fb81..d93e6cb8 100644 --- a/supernode/config/save.go +++ b/supernode/config/save.go @@ -32,21 +32,21 @@ func SaveConfig(config *Config, filename string) error { // CreateDefaultConfig creates a default configuration with the specified values func CreateDefaultConfig(keyName, identity, chainID string, keyringBackend, keyringDir string, passPlain, passEnv, passFile string) *Config { - // Set default values if keyringBackend == "" { - keyringBackend = "test" + keyringBackend = DefaultKeyringBackend } if keyringDir == "" { - keyringDir = "keys" + keyringDir = DefaultKeyringDir + } + if keyName == "" { + keyName = DefaultKeyName + } + if chainID == "" { + chainID = DefaultChainID } return &Config{ - SupernodeConfig: SupernodeConfig{ - KeyName: keyName, - Identity: identity, - Host: "0.0.0.0", - Port: 4444, - }, + SupernodeConfig: SupernodeConfig{KeyName: keyName, Identity: identity, Host: DefaultSupernodeHost, Port: DefaultSupernodePort}, KeyringConfig: KeyringConfig{ Backend: keyringBackend, Dir: keyringDir, @@ -54,16 +54,8 @@ func CreateDefaultConfig(keyName, identity, chainID string, keyringBackend, keyr PassEnv: passEnv, PassFile: passFile, }, - P2PConfig: P2PConfig{ - Port: 4445, - DataDir: "data/p2p", - }, - LumeraClientConfig: LumeraClientConfig{ - GRPCAddr: "localhost:9090", - ChainID: chainID, - }, - RaptorQConfig: RaptorQConfig{ - FilesDir: "raptorq_files", - }, + P2PConfig: P2PConfig{Port: DefaultP2PPort, DataDir: "data/p2p"}, + LumeraClientConfig: LumeraClientConfig{GRPCAddr: DefaultLumeraGRPC, ChainID: chainID}, + RaptorQConfig: RaptorQConfig{FilesDir: DefaultRaptorQFilesDir}, } } diff --git a/supernode/node/action/server/cascade/cascade_action_server.go b/supernode/node/action/server/cascade/cascade_action_server.go deleted file mode 100644 index 6a38b750..00000000 --- a/supernode/node/action/server/cascade/cascade_action_server.go +++ /dev/null @@ -1,367 +0,0 @@ -package cascade - -import ( - "encoding/hex" - "fmt" - "io" - "os" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - - "google.golang.org/grpc" -) - -type ActionServer struct { - pb.UnimplementedCascadeServiceServer - factory cascadeService.CascadeServiceFactory -} - -// NewCascadeActionServer creates a new CascadeActionServer with injected service -func NewCascadeActionServer(factory cascadeService.CascadeServiceFactory) *ActionServer { - return &ActionServer{factory: factory} -} - -// calculateOptimalChunkSize returns an optimal chunk size based on file size -// to balance throughput and memory usage -func calculateOptimalChunkSize(fileSize int64) int { - const ( - minChunkSize = 64 * 1024 // 64 KB minimum - maxChunkSize = 4 * 1024 * 1024 // 4 MB maximum for 1GB+ files - smallFileThreshold = 1024 * 1024 // 1 MB - mediumFileThreshold = 50 * 1024 * 1024 // 50 MB - largeFileThreshold = 500 * 1024 * 1024 // 500 MB - ) - - var chunkSize int - - switch { - case fileSize <= smallFileThreshold: - // For small files (up to 1MB), use 64KB chunks - chunkSize = minChunkSize - case fileSize <= mediumFileThreshold: - // For medium files (1MB-50MB), use 256KB chunks - chunkSize = 256 * 1024 - case fileSize <= largeFileThreshold: - // For large files (50MB-500MB), use 1MB chunks - chunkSize = 1024 * 1024 - default: - // For very large files (500MB+), use 4MB chunks for optimal throughput - chunkSize = maxChunkSize - } - - // Ensure chunk size is within bounds - if chunkSize < minChunkSize { - chunkSize = minChunkSize - } - if chunkSize > maxChunkSize { - chunkSize = maxChunkSize - } - - return chunkSize -} - -func (server *ActionServer) Desc() *grpc.ServiceDesc { - return &pb.CascadeService_ServiceDesc -} - -func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) error { - fields := logtrace.Fields{ - logtrace.FieldMethod: "Register", - logtrace.FieldModule: "CascadeActionServer", - } - - ctx := stream.Context() - logtrace.Debug(ctx, "client streaming request to upload cascade input data received", fields) - - const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit - - var ( - metadata *pb.Metadata - totalSize int - ) - - hasher, tempFile, tempFilePath, err := initializeHasherAndTempFile() - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to initialize hasher and temp file", fields) - return fmt.Errorf("initializing hasher and temp file: %w", err) - } - defer func(tempFile *os.File) { - err := tempFile.Close() - if err != nil && !errors.Is(err, os.ErrClosed) { - fields[logtrace.FieldError] = err.Error() - logtrace.Warn(ctx, "error closing temp file", fields) - } - }(tempFile) - - // Process incoming stream - for { - req, err := stream.Recv() - if err == io.EOF { - // End of stream - break - } - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "error receiving stream data", fields) - return fmt.Errorf("failed to receive stream data: %w", err) - } - - // Check which type of message we received - switch x := req.RequestType.(type) { - case *pb.RegisterRequest_Chunk: - if x.Chunk != nil { - - // hash the chunks - _, err := hasher.Write(x.Chunk.Data) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to write chunk to hasher", fields) - return fmt.Errorf("hashing error: %w", err) - } - - // write chunks to the file - _, err = tempFile.Write(x.Chunk.Data) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to write chunk to file", fields) - return fmt.Errorf("file write error: %w", err) - } - totalSize += len(x.Chunk.Data) - - // Validate total size doesn't exceed limit - if totalSize > maxFileSize { - fields[logtrace.FieldError] = "file size exceeds 1GB limit" - fields["total_size"] = totalSize - logtrace.Error(ctx, "upload rejected: file too large", fields) - return fmt.Errorf("file size %d exceeds maximum allowed size of 1GB", totalSize) - } - - logtrace.Debug(ctx, "received data chunk", logtrace.Fields{ - "chunk_size": len(x.Chunk.Data), - "total_size_so_far": totalSize, - }) - } - case *pb.RegisterRequest_Metadata: - // Store metadata - this should be the final message - metadata = x.Metadata - logtrace.Debug(ctx, "received metadata", logtrace.Fields{ - "task_id": metadata.TaskId, - "action_id": metadata.ActionId, - }) - } - } - - // Verify we received metadata - if metadata == nil { - logtrace.Error(ctx, "no metadata received in stream", fields) - return fmt.Errorf("no metadata received") - } - fields[logtrace.FieldTaskID] = metadata.GetTaskId() - fields[logtrace.FieldActionID] = metadata.GetActionId() - logtrace.Debug(ctx, "metadata received from action-sdk", fields) - - // Ensure all data is written to disk before calculating hash - if err := tempFile.Sync(); err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to sync temp file", fields) - return fmt.Errorf("failed to sync temp file: %w", err) - } - - hash := hasher.Sum(nil) - hashHex := hex.EncodeToString(hash) - fields[logtrace.FieldHashHex] = hashHex - logtrace.Debug(ctx, "final BLAKE3 hash generated", fields) - - targetPath, err := replaceTempDirWithTaskDir(metadata.GetTaskId(), tempFilePath, tempFile) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to replace temp dir with task dir", fields) - return fmt.Errorf("failed to replace temp dir with task dir: %w", err) - } - - // Process the complete data - task := server.factory.NewCascadeRegistrationTask() - err = task.Register(ctx, &cascadeService.RegisterRequest{ - TaskID: metadata.TaskId, - ActionID: metadata.ActionId, - DataHash: hash, - DataSize: totalSize, - FilePath: targetPath, - }, func(resp *cascadeService.RegisterResponse) error { - grpcResp := &pb.RegisterResponse{ - EventType: pb.SupernodeEventType(resp.EventType), - Message: resp.Message, - TxHash: resp.TxHash, - } - if err := stream.Send(grpcResp); err != nil { - logtrace.Error(ctx, "failed to send response to client", logtrace.Fields{ - logtrace.FieldError: err.Error(), - }) - return err - } - return nil - }) - - if err != nil { - logtrace.Error(ctx, "registration task failed", logtrace.Fields{ - logtrace.FieldError: err.Error(), - }) - return fmt.Errorf("registration failed: %w", err) - } - - logtrace.Debug(ctx, "cascade registration completed successfully", fields) - return nil -} - -func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeService_DownloadServer) error { - fields := logtrace.Fields{ - logtrace.FieldMethod: "Download", - logtrace.FieldModule: "CascadeActionServer", - logtrace.FieldActionID: req.GetActionId(), - } - - ctx := stream.Context() - logtrace.Debug(ctx, "download request received from client", fields) - - task := server.factory.NewCascadeRegistrationTask() - - // Authorization is enforced inside the task based on metadata.Public. - // If public, signature is skipped; if private, signature is required. - - var restoredFilePath string - var tmpDir string - - // Ensure tmpDir is cleaned up even if errors occur after retrieval - defer func() { - if tmpDir != "" { - if err := task.CleanupDownload(ctx, tmpDir); err != nil { - logtrace.Error(ctx, "error cleaning up the tmp dir", logtrace.Fields{logtrace.FieldError: err.Error()}) - } else { - logtrace.Debug(ctx, "tmp dir has been cleaned up", logtrace.Fields{"tmp_dir": tmpDir}) - } - } - }() - - err := task.Download(ctx, &cascadeService.DownloadRequest{ - ActionID: req.GetActionId(), - Signature: req.GetSignature(), - }, func(resp *cascadeService.DownloadResponse) error { - grpcResp := &pb.DownloadResponse{ - ResponseType: &pb.DownloadResponse_Event{ - Event: &pb.DownloadEvent{ - EventType: pb.SupernodeEventType(resp.EventType), - Message: resp.Message, - }, - }, - } - - if resp.FilePath != "" { - restoredFilePath = resp.FilePath - tmpDir = resp.DownloadedDir - } - - return stream.Send(grpcResp) - }) - - if err != nil { - logtrace.Error(ctx, "error occurred during download process", logtrace.Fields{ - logtrace.FieldError: err.Error(), - }) - return err - } - - if restoredFilePath == "" { - logtrace.Error(ctx, "no artefact file retrieved", fields) - return fmt.Errorf("no artefact to stream") - } - logtrace.Debug(ctx, "streaming artefact file in chunks", fields) - - // Open the restored file and stream directly from disk to avoid buffering entire file in memory - f, err := os.Open(restoredFilePath) - if err != nil { - logtrace.Error(ctx, "failed to open restored file", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - logtrace.Error(ctx, "failed to stat restored file", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - - // Calculate optimal chunk size based on file size - chunkSize := calculateOptimalChunkSize(fi.Size()) - logtrace.Debug(ctx, "calculated optimal chunk size for download", logtrace.Fields{ - "file_size": fi.Size(), - "chunk_size": chunkSize, - }) - - // Pre-read first chunk to avoid any delay between SERVE_READY and first data - buf := make([]byte, chunkSize) - n, readErr := f.Read(buf) - if readErr != nil && readErr != io.EOF { - return fmt.Errorf("chunked read failed: %w", readErr) - } - - // Announce: file is ready to be served to the client (right before first data) - if err := stream.Send(&pb.DownloadResponse{ - ResponseType: &pb.DownloadResponse_Event{ - Event: &pb.DownloadEvent{ - EventType: pb.SupernodeEventType_SERVE_READY, - Message: "File available for download", - }, - }, - }); err != nil { - logtrace.Error(ctx, "failed to send serve-ready event", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - - // Send pre-read first chunk if available - if n > 0 { - if err := stream.Send(&pb.DownloadResponse{ - ResponseType: &pb.DownloadResponse_Chunk{ - Chunk: &pb.DataChunk{Data: buf[:n]}, - }, - }); err != nil { - logtrace.Error(ctx, "failed to stream first chunk", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - } - - // If EOF after first read, we're done - if readErr == io.EOF { - logtrace.Debug(ctx, "completed streaming all chunks", fields) - return nil - } - - // Continue streaming remaining chunks - for { - n, readErr = f.Read(buf) - if n > 0 { - if err := stream.Send(&pb.DownloadResponse{ - ResponseType: &pb.DownloadResponse_Chunk{ - Chunk: &pb.DataChunk{Data: buf[:n]}, - }, - }); err != nil { - logtrace.Error(ctx, "failed to stream chunk", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - } - if readErr == io.EOF { - break - } - if readErr != nil { - return fmt.Errorf("chunked read failed: %w", readErr) - } - } - - // Cleanup is handled in deferred block above - - logtrace.Debug(ctx, "completed streaming all chunks", fields) - return nil -} diff --git a/supernode/node/action/server/cascade/cascade_action_server_mock.go b/supernode/node/action/server/cascade/cascade_action_server_mock.go deleted file mode 100644 index 3113dcb3..00000000 --- a/supernode/node/action/server/cascade/cascade_action_server_mock.go +++ /dev/null @@ -1,41 +0,0 @@ -package cascade - -import ( - "context" - "io" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" - "google.golang.org/grpc/metadata" -) - -// mockStream simulates pb.CascadeService_RegisterServer -type mockStream struct { - ctx context.Context - request []*pb.RegisterRequest - sent []*pb.RegisterResponse - pos int -} - -func (m *mockStream) Context() context.Context { - return m.ctx -} - -func (m *mockStream) Send(resp *pb.RegisterResponse) error { - m.sent = append(m.sent, resp) - return nil -} - -func (m *mockStream) Recv() (*pb.RegisterRequest, error) { - if m.pos >= len(m.request) { - return nil, io.EOF - } - req := m.request[m.pos] - m.pos++ - return req, nil -} - -func (m *mockStream) SetHeader(md metadata.MD) error { return nil } -func (m *mockStream) SendHeader(md metadata.MD) error { return nil } -func (m *mockStream) SetTrailer(md metadata.MD) {} -func (m *mockStream) SendMsg(_ any) error { return nil } -func (m *mockStream) RecvMsg(_ any) error { return nil } diff --git a/supernode/node/action/server/cascade/cascade_action_server_test.go b/supernode/node/action/server/cascade/cascade_action_server_test.go deleted file mode 100644 index c71c0173..00000000 --- a/supernode/node/action/server/cascade/cascade_action_server_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package cascade - -import ( - "context" - "errors" - "testing" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - cascademocks "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/mocks" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -func TestRegister_Success(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockTask := cascademocks.NewMockCascadeTask(ctrl) - mockFactory := cascademocks.NewMockCascadeServiceFactory(ctrl) - - // Expect Register to be called with any input, respond via callback - mockTask.EXPECT().Register(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, req *cascade.RegisterRequest, send func(*cascade.RegisterResponse) error) error { - return send(&cascade.RegisterResponse{ - EventType: 1, - Message: "registration successful", - TxHash: "tx123", - }) - }, - ).Times(1) - - mockFactory.EXPECT().NewCascadeRegistrationTask().Return(mockTask).Times(1) - - server := NewCascadeActionServer(mockFactory) - - stream := &mockStream{ - ctx: context.Background(), - request: []*pb.RegisterRequest{ - {RequestType: &pb.RegisterRequest_Chunk{Chunk: &pb.DataChunk{Data: []byte("abc123")}}}, - {RequestType: &pb.RegisterRequest_Metadata{ - Metadata: &pb.Metadata{TaskId: "t1", ActionId: "a1"}, - }}, - }, - } - - err := server.Register(stream) - assert.NoError(t, err) - assert.Len(t, stream.sent, 1) - assert.Equal(t, "registration successful", stream.sent[0].Message) - assert.Equal(t, "tx123", stream.sent[0].TxHash) -} - -func TestRegister_Error_NoMetadata(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockFactory := cascademocks.NewMockCascadeServiceFactory(ctrl) - server := NewCascadeActionServer(mockFactory) - - stream := &mockStream{ - ctx: context.Background(), - request: []*pb.RegisterRequest{ - {RequestType: &pb.RegisterRequest_Chunk{Chunk: &pb.DataChunk{Data: []byte("abc123")}}}, - }, - } - - err := server.Register(stream) - assert.EqualError(t, err, "no metadata received") -} - -func TestRegister_Error_TaskFails(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockTask := cascademocks.NewMockCascadeTask(ctrl) - mockFactory := cascademocks.NewMockCascadeServiceFactory(ctrl) - - mockTask.EXPECT().Register(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("task failed")).Times(1) - mockFactory.EXPECT().NewCascadeRegistrationTask().Return(mockTask).Times(1) - - server := NewCascadeActionServer(mockFactory) - - stream := &mockStream{ - ctx: context.Background(), - request: []*pb.RegisterRequest{ - {RequestType: &pb.RegisterRequest_Chunk{Chunk: &pb.DataChunk{Data: []byte("abc123")}}}, - {RequestType: &pb.RegisterRequest_Metadata{ - Metadata: &pb.Metadata{TaskId: "t1", ActionId: "a1"}, - }}, - }, - } - - err := server.Register(stream) - assert.EqualError(t, err, "registration failed: task failed") -} diff --git a/supernode/node/action/server/cascade/helper.go b/supernode/node/action/server/cascade/helper.go deleted file mode 100644 index 386e9ce6..00000000 --- a/supernode/node/action/server/cascade/helper.go +++ /dev/null @@ -1,39 +0,0 @@ -package cascade - -import ( - "fmt" - "lukechampine.com/blake3" - "os" - "path/filepath" - - "github.com/LumeraProtocol/supernode/v2/pkg/errors" -) - -func initializeHasherAndTempFile() (*blake3.Hasher, *os.File, string, error) { - hasher := blake3.New(32, nil) - - // Create a unique temp file to avoid collisions across concurrent calls - tempFile, err := os.CreateTemp("", "cascade-upload-*") - if err != nil { - return nil, nil, "", fmt.Errorf("could not create temp file: %w", err) - } - - return hasher, tempFile, tempFile.Name(), nil -} - -func replaceTempDirWithTaskDir(taskID, tempFilePath string, tempFile *os.File) (targetPath string, err error) { - if err := tempFile.Close(); err != nil && !errors.Is(err, os.ErrClosed) { - return "", fmt.Errorf("failed to close temp file: %w", err) - } - - targetDir := filepath.Join(os.TempDir(), taskID) - if err := os.MkdirAll(targetDir, 0755); err != nil { - return "", fmt.Errorf("could not create task directory: %w", err) - } - targetPath = filepath.Join(targetDir, fmt.Sprintf("uploaded-%s.dat", taskID)) - if err := os.Rename(tempFilePath, targetPath); err != nil { - return "", fmt.Errorf("could not move file to final location: %w", err) - } - - return targetPath, nil -} diff --git a/supernode/node/supernode/gateway/server.go b/supernode/node/supernode/gateway/server.go deleted file mode 100644 index 7e17e238..00000000 --- a/supernode/node/supernode/gateway/server.go +++ /dev/null @@ -1,126 +0,0 @@ -package gateway - -import ( - "context" - "fmt" - "net" - "net/http" - "strconv" - "time" - - "github.com/grpc-ecosystem/grpc-gateway/runtime" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" -) - -// DefaultGatewayPort is an uncommon port for internal gateway use -const DefaultGatewayPort = 8002 - -// Server represents the HTTP gateway server -type Server struct { - ipAddress string - port int - server *http.Server - supernodeServer pb.SupernodeServiceServer -} - -// NewServer creates a new HTTP gateway server that directly calls the service -// If port is 0, it will use the default port -func NewServer(ipAddress string, port int, supernodeServer pb.SupernodeServiceServer) (*Server, error) { - if supernodeServer == nil { - return nil, fmt.Errorf("supernode server is required") - } - - // Use default port if not specified - if port == 0 { - port = DefaultGatewayPort - } - - return &Server{ - ipAddress: ipAddress, - port: port, - supernodeServer: supernodeServer, - }, nil -} - -// Run starts the HTTP gateway server (implements service interface) -func (s *Server) Run(ctx context.Context) error { - // Create gRPC-Gateway mux with custom JSON marshaler options - mux := runtime.NewServeMux( - runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.JSONPb{ - EmitDefaults: true, // This ensures zero values are included - OrigName: true, // Use original proto field names - }), - ) - - // Register the service handler directly - err := pb.RegisterSupernodeServiceHandlerServer(ctx, mux, s.supernodeServer) - if err != nil { - return fmt.Errorf("failed to register gateway handler: %w", err) - } - - // Create HTTP mux for custom endpoints - httpMux := http.NewServeMux() - - // Register gRPC-Gateway endpoints - httpMux.Handle("/api/", mux) - - // Register Swagger endpoints - httpMux.HandleFunc("/swagger.json", s.serveSwaggerJSON) - httpMux.HandleFunc("/swagger-ui/", s.serveSwaggerUI) - httpMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/" { - http.Redirect(w, r, "/swagger-ui/", http.StatusFound) - } else { - http.NotFound(w, r) - } - }) - - // Create HTTP server - s.server = &http.Server{ - Addr: net.JoinHostPort(s.ipAddress, strconv.Itoa(s.port)), - Handler: s.corsMiddleware(httpMux), - ReadTimeout: 15 * time.Second, - WriteTimeout: 15 * time.Second, - IdleTimeout: 60 * time.Second, - } - - logtrace.Debug(ctx, "Starting HTTP gateway server", logtrace.Fields{ - "address": s.ipAddress, - "port": s.port, - }) - - // Start server - if err := s.server.ListenAndServe(); err != nil && err != http.ErrServerClosed { - return fmt.Errorf("gateway server failed: %w", err) - } - - return nil -} - -// Stop gracefully stops the HTTP gateway server (implements service interface) -func (s *Server) Stop(ctx context.Context) error { - if s.server == nil { - return nil - } - - logtrace.Debug(ctx, "Shutting down HTTP gateway server", nil) - return s.server.Shutdown(ctx) -} - -// corsMiddleware adds CORS headers for web access -func (s *Server) corsMiddleware(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS") - w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, Authorization") - - if r.Method == "OPTIONS" { - w.WriteHeader(http.StatusOK) - return - } - - h.ServeHTTP(w, r) - }) -} diff --git a/supernode/node/supernode/gateway/swagger.json b/supernode/node/supernode/gateway/swagger.json deleted file mode 100644 index af023816..00000000 --- a/supernode/node/supernode/gateway/swagger.json +++ /dev/null @@ -1,296 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "supernode/supernode.proto", - "version": "version not set" - }, - "tags": [ - { - "name": "SupernodeService" - } - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/api/v1/services": { - "get": { - "operationId": "SupernodeService_ListServices", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/supernodeListServicesResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/rpcStatus" - } - } - }, - "tags": [ - "SupernodeService" - ] - } - }, - "/api/v1/status": { - "get": { - "operationId": "SupernodeService_GetStatus", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/supernodeStatusResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/rpcStatus" - } - } - }, - "tags": [ - "SupernodeService" - ] - } - } - }, - "definitions": { - "ResourcesCPU": { - "type": "object", - "properties": { - "usagePercent": { - "type": "number", - "format": "double", - "title": "CPU usage percentage (0-100)" - }, - "cores": { - "type": "integer", - "format": "int32", - "title": "Number of CPU cores" - } - } - }, - "ResourcesMemory": { - "type": "object", - "properties": { - "totalGb": { - "type": "number", - "format": "double", - "title": "Total memory in GB" - }, - "usedGb": { - "type": "number", - "format": "double", - "title": "Used memory in GB" - }, - "availableGb": { - "type": "number", - "format": "double", - "title": "Available memory in GB" - }, - "usagePercent": { - "type": "number", - "format": "double", - "title": "Memory usage percentage (0-100)" - } - } - }, - "ResourcesStorage": { - "type": "object", - "properties": { - "path": { - "type": "string", - "title": "Storage path being monitored" - }, - "totalBytes": { - "type": "string", - "format": "uint64" - }, - "usedBytes": { - "type": "string", - "format": "uint64" - }, - "availableBytes": { - "type": "string", - "format": "uint64" - }, - "usagePercent": { - "type": "number", - "format": "double", - "title": "Storage usage percentage (0-100)" - } - } - }, - "StatusResponseNetwork": { - "type": "object", - "properties": { - "peersCount": { - "type": "integer", - "format": "int32", - "title": "Number of connected peers in P2P network" - }, - "peerAddresses": { - "type": "array", - "items": { - "type": "string" - }, - "title": "List of connected peer addresses (optional, may be empty for privacy)" - } - }, - "title": "Network information" - }, - "StatusResponseResources": { - "type": "object", - "properties": { - "cpu": { - "$ref": "#/definitions/ResourcesCPU" - }, - "memory": { - "$ref": "#/definitions/ResourcesMemory" - }, - "storageVolumes": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/ResourcesStorage" - } - }, - "hardwareSummary": { - "type": "string", - "title": "Formatted hardware summary (e.g., \"8 cores / 32GB RAM\")" - } - }, - "title": "System resource information" - }, - "StatusResponseServiceTasks": { - "type": "object", - "properties": { - "serviceName": { - "type": "string" - }, - "taskIds": { - "type": "array", - "items": { - "type": "string" - } - }, - "taskCount": { - "type": "integer", - "format": "int32" - } - }, - "title": "ServiceTasks contains task information for a specific service" - }, - "protobufAny": { - "type": "object", - "properties": { - "@type": { - "type": "string" - } - }, - "additionalProperties": {} - }, - "rpcStatus": { - "type": "object", - "properties": { - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "supernodeListServicesResponse": { - "type": "object", - "properties": { - "services": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/supernodeServiceInfo" - } - }, - "count": { - "type": "integer", - "format": "int32" - } - } - }, - "supernodeServiceInfo": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "methods": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "supernodeStatusResponse": { - "type": "object", - "properties": { - "version": { - "type": "string", - "title": "Supernode version" - }, - "uptimeSeconds": { - "type": "string", - "format": "uint64", - "title": "Uptime in seconds" - }, - "resources": { - "$ref": "#/definitions/StatusResponseResources" - }, - "runningTasks": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/StatusResponseServiceTasks" - }, - "title": "Services with currently running tasks" - }, - "registeredServices": { - "type": "array", - "items": { - "type": "string" - }, - "title": "All registered/available services" - }, - "network": { - "$ref": "#/definitions/StatusResponseNetwork", - "title": "P2P network information" - }, - "rank": { - "type": "integer", - "format": "int32", - "title": "Rank in the top supernodes list (0 if not in top list)" - }, - "ipAddress": { - "type": "string", - "title": "Supernode IP address with port (e.g., \"192.168.1.1:4445\")" - } - }, - "title": "The StatusResponse represents system status with clear organization" - } - } -} diff --git a/supernode/node/supernode/server/config.go b/supernode/node/supernode/server/config.go deleted file mode 100644 index 4e9d0f23..00000000 --- a/supernode/node/supernode/server/config.go +++ /dev/null @@ -1,19 +0,0 @@ -package server - -const ( - defaultPort = 4444 -) - -// Config contains settings of the supernode server. -type Config struct { - Identity string - ListenAddresses string - Port int -} - -// NewConfig returns a new Config instance. -func NewConfig() *Config { - return &Config{ - Port: defaultPort, - } -} diff --git a/supernode/node/supernode/server/config_test.go b/supernode/node/supernode/server/config_test.go deleted file mode 100644 index 33e06f68..00000000 --- a/supernode/node/supernode/server/config_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package server - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNewConfig_Defaults(t *testing.T) { - cfg := NewConfig() - - assert.NotNil(t, cfg) - assert.Equal(t, "", cfg.ListenAddresses, "default listen address should be empty") - assert.Equal(t, 4444, cfg.Port, "default port should be 4444") - assert.Equal(t, "", cfg.Identity, "default identity should be empty") -} diff --git a/supernode/node/supernode/server/mock_keyring.go b/supernode/node/supernode/server/mock_keyring.go deleted file mode 100644 index 85cb9910..00000000 --- a/supernode/node/supernode/server/mock_keyring.go +++ /dev/null @@ -1,379 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/cosmos/cosmos-sdk/crypto/keyring (interfaces: Keyring) - -// Package mock_keyring is a generated GoMock package. -package server - -import ( - reflect "reflect" - - keyring "github.com/cosmos/cosmos-sdk/crypto/keyring" - types "github.com/cosmos/cosmos-sdk/crypto/types" - types0 "github.com/cosmos/cosmos-sdk/types" - signing "github.com/cosmos/cosmos-sdk/types/tx/signing" - gomock "go.uber.org/mock/gomock" -) - -// MockKeyring is a mock of Keyring interface. -type MockKeyring struct { - ctrl *gomock.Controller - recorder *MockKeyringMockRecorder -} - -// MockKeyringMockRecorder is the mock recorder for MockKeyring. -type MockKeyringMockRecorder struct { - mock *MockKeyring -} - -// NewMockKeyring creates a new mock instance. -func NewMockKeyring(ctrl *gomock.Controller) *MockKeyring { - mock := &MockKeyring{ctrl: ctrl} - mock.recorder = &MockKeyringMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockKeyring) EXPECT() *MockKeyringMockRecorder { - return m.recorder -} - -// Backend mocks base method. -func (m *MockKeyring) Backend() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Backend") - ret0, _ := ret[0].(string) - return ret0 -} - -// Backend indicates an expected call of Backend. -func (mr *MockKeyringMockRecorder) Backend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Backend", reflect.TypeOf((*MockKeyring)(nil).Backend)) -} - -// Delete mocks base method. -func (m *MockKeyring) Delete(arg0 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockKeyringMockRecorder) Delete(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKeyring)(nil).Delete), arg0) -} - -// DeleteByAddress mocks base method. -func (m *MockKeyring) DeleteByAddress(arg0 types0.Address) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteByAddress", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteByAddress indicates an expected call of DeleteByAddress. -func (mr *MockKeyringMockRecorder) DeleteByAddress(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteByAddress", reflect.TypeOf((*MockKeyring)(nil).DeleteByAddress), arg0) -} - -// ExportPrivKeyArmor mocks base method. -func (m *MockKeyring) ExportPrivKeyArmor(arg0, arg1 string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportPrivKeyArmor", arg0, arg1) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExportPrivKeyArmor indicates an expected call of ExportPrivKeyArmor. -func (mr *MockKeyringMockRecorder) ExportPrivKeyArmor(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPrivKeyArmor", reflect.TypeOf((*MockKeyring)(nil).ExportPrivKeyArmor), arg0, arg1) -} - -// ExportPrivKeyArmorByAddress mocks base method. -func (m *MockKeyring) ExportPrivKeyArmorByAddress(arg0 types0.Address, arg1 string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportPrivKeyArmorByAddress", arg0, arg1) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExportPrivKeyArmorByAddress indicates an expected call of ExportPrivKeyArmorByAddress. -func (mr *MockKeyringMockRecorder) ExportPrivKeyArmorByAddress(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPrivKeyArmorByAddress", reflect.TypeOf((*MockKeyring)(nil).ExportPrivKeyArmorByAddress), arg0, arg1) -} - -// ExportPubKeyArmor mocks base method. -func (m *MockKeyring) ExportPubKeyArmor(arg0 string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportPubKeyArmor", arg0) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExportPubKeyArmor indicates an expected call of ExportPubKeyArmor. -func (mr *MockKeyringMockRecorder) ExportPubKeyArmor(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPubKeyArmor", reflect.TypeOf((*MockKeyring)(nil).ExportPubKeyArmor), arg0) -} - -// ExportPubKeyArmorByAddress mocks base method. -func (m *MockKeyring) ExportPubKeyArmorByAddress(arg0 types0.Address) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportPubKeyArmorByAddress", arg0) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExportPubKeyArmorByAddress indicates an expected call of ExportPubKeyArmorByAddress. -func (mr *MockKeyringMockRecorder) ExportPubKeyArmorByAddress(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPubKeyArmorByAddress", reflect.TypeOf((*MockKeyring)(nil).ExportPubKeyArmorByAddress), arg0) -} - -// ImportPrivKey mocks base method. -func (m *MockKeyring) ImportPrivKey(arg0, arg1, arg2 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImportPrivKey", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ImportPrivKey indicates an expected call of ImportPrivKey. -func (mr *MockKeyringMockRecorder) ImportPrivKey(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportPrivKey", reflect.TypeOf((*MockKeyring)(nil).ImportPrivKey), arg0, arg1, arg2) -} - -// ImportPrivKeyHex mocks base method. -func (m *MockKeyring) ImportPrivKeyHex(arg0, arg1, arg2 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImportPrivKeyHex", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ImportPrivKeyHex indicates an expected call of ImportPrivKeyHex. -func (mr *MockKeyringMockRecorder) ImportPrivKeyHex(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportPrivKeyHex", reflect.TypeOf((*MockKeyring)(nil).ImportPrivKeyHex), arg0, arg1, arg2) -} - -// ImportPubKey mocks base method. -func (m *MockKeyring) ImportPubKey(arg0, arg1 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImportPubKey", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ImportPubKey indicates an expected call of ImportPubKey. -func (mr *MockKeyringMockRecorder) ImportPubKey(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportPubKey", reflect.TypeOf((*MockKeyring)(nil).ImportPubKey), arg0, arg1) -} - -// Key mocks base method. -func (m *MockKeyring) Key(arg0 string) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Key", arg0) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Key indicates an expected call of Key. -func (mr *MockKeyringMockRecorder) Key(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Key", reflect.TypeOf((*MockKeyring)(nil).Key), arg0) -} - -// KeyByAddress mocks base method. -func (m *MockKeyring) KeyByAddress(arg0 types0.Address) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "KeyByAddress", arg0) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// KeyByAddress indicates an expected call of KeyByAddress. -func (mr *MockKeyringMockRecorder) KeyByAddress(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeyByAddress", reflect.TypeOf((*MockKeyring)(nil).KeyByAddress), arg0) -} - -// List mocks base method. -func (m *MockKeyring) List() ([]*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "List") - ret0, _ := ret[0].([]*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// List indicates an expected call of List. -func (mr *MockKeyringMockRecorder) List() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockKeyring)(nil).List)) -} - -// MigrateAll mocks base method. -func (m *MockKeyring) MigrateAll() ([]*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MigrateAll") - ret0, _ := ret[0].([]*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// MigrateAll indicates an expected call of MigrateAll. -func (mr *MockKeyringMockRecorder) MigrateAll() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MigrateAll", reflect.TypeOf((*MockKeyring)(nil).MigrateAll)) -} - -// NewAccount mocks base method. -func (m *MockKeyring) NewAccount(arg0, arg1, arg2, arg3 string, arg4 keyring.SignatureAlgo) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewAccount", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewAccount indicates an expected call of NewAccount. -func (mr *MockKeyringMockRecorder) NewAccount(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAccount", reflect.TypeOf((*MockKeyring)(nil).NewAccount), arg0, arg1, arg2, arg3, arg4) -} - -// NewMnemonic mocks base method. -func (m *MockKeyring) NewMnemonic(arg0 string, arg1 keyring.Language, arg2, arg3 string, arg4 keyring.SignatureAlgo) (*keyring.Record, string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewMnemonic", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(string) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// NewMnemonic indicates an expected call of NewMnemonic. -func (mr *MockKeyringMockRecorder) NewMnemonic(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewMnemonic", reflect.TypeOf((*MockKeyring)(nil).NewMnemonic), arg0, arg1, arg2, arg3, arg4) -} - -// Rename mocks base method. -func (m *MockKeyring) Rename(arg0, arg1 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Rename", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Rename indicates an expected call of Rename. -func (mr *MockKeyringMockRecorder) Rename(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Rename", reflect.TypeOf((*MockKeyring)(nil).Rename), arg0, arg1) -} - -// SaveLedgerKey mocks base method. -func (m *MockKeyring) SaveLedgerKey(arg0 string, arg1 keyring.SignatureAlgo, arg2 string, arg3, arg4, arg5 uint32) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SaveLedgerKey", arg0, arg1, arg2, arg3, arg4, arg5) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SaveLedgerKey indicates an expected call of SaveLedgerKey. -func (mr *MockKeyringMockRecorder) SaveLedgerKey(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveLedgerKey", reflect.TypeOf((*MockKeyring)(nil).SaveLedgerKey), arg0, arg1, arg2, arg3, arg4, arg5) -} - -// SaveMultisig mocks base method. -func (m *MockKeyring) SaveMultisig(arg0 string, arg1 types.PubKey) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SaveMultisig", arg0, arg1) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SaveMultisig indicates an expected call of SaveMultisig. -func (mr *MockKeyringMockRecorder) SaveMultisig(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveMultisig", reflect.TypeOf((*MockKeyring)(nil).SaveMultisig), arg0, arg1) -} - -// SaveOfflineKey mocks base method. -func (m *MockKeyring) SaveOfflineKey(arg0 string, arg1 types.PubKey) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SaveOfflineKey", arg0, arg1) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SaveOfflineKey indicates an expected call of SaveOfflineKey. -func (mr *MockKeyringMockRecorder) SaveOfflineKey(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveOfflineKey", reflect.TypeOf((*MockKeyring)(nil).SaveOfflineKey), arg0, arg1) -} - -// Sign mocks base method. -func (m *MockKeyring) Sign(arg0 string, arg1 []byte, arg2 signing.SignMode) ([]byte, types.PubKey, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Sign", arg0, arg1, arg2) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(types.PubKey) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// Sign indicates an expected call of Sign. -func (mr *MockKeyringMockRecorder) Sign(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sign", reflect.TypeOf((*MockKeyring)(nil).Sign), arg0, arg1, arg2) -} - -// SignByAddress mocks base method. -func (m *MockKeyring) SignByAddress(arg0 types0.Address, arg1 []byte, arg2 signing.SignMode) ([]byte, types.PubKey, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SignByAddress", arg0, arg1, arg2) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(types.PubKey) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// SignByAddress indicates an expected call of SignByAddress. -func (mr *MockKeyringMockRecorder) SignByAddress(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignByAddress", reflect.TypeOf((*MockKeyring)(nil).SignByAddress), arg0, arg1, arg2) -} - -// SupportedAlgorithms mocks base method. -func (m *MockKeyring) SupportedAlgorithms() (keyring.SigningAlgoList, keyring.SigningAlgoList) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SupportedAlgorithms") - ret0, _ := ret[0].(keyring.SigningAlgoList) - ret1, _ := ret[1].(keyring.SigningAlgoList) - return ret0, ret1 -} - -// SupportedAlgorithms indicates an expected call of SupportedAlgorithms. -func (mr *MockKeyringMockRecorder) SupportedAlgorithms() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SupportedAlgorithms", reflect.TypeOf((*MockKeyring)(nil).SupportedAlgorithms)) -} diff --git a/supernode/node/supernode/server/server_test.go b/supernode/node/supernode/server/server_test.go deleted file mode 100644 index 7803bcce..00000000 --- a/supernode/node/supernode/server/server_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package server - -import ( - "testing" - - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/stretchr/testify/assert" - gomock "go.uber.org/mock/gomock" - - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" -) - -// --- Mock service implementing server.service --- -type mockService struct{} - -func (m *mockService) Desc() *grpc.ServiceDesc { - return &grpc.ServiceDesc{ - ServiceName: "test.Service", - HandlerType: (*interface{})(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{}, - } -} - -func TestNewServer_WithValidConfig(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockKeyring := NewMockKeyring(ctl) - mockLumeraClient := lumera.NewMockClient(ctl) - - cfg := NewConfig() - cfg.ListenAddresses = "127.0.0.1" - s, err := New(cfg, "supernode-test", mockKeyring, mockLumeraClient, &mockService{}) - assert.NoError(t, err) - assert.NotNil(t, s) -} - -func TestNewServer_WithNilConfig(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockKeyring := NewMockKeyring(ctl) - mockLumeraClient := lumera.NewMockClient(ctl) - - s, err := New(nil, "supernode-test", mockKeyring, mockLumeraClient) - assert.Nil(t, s) - assert.EqualError(t, err, "config is nil") -} - -func TestSetServiceStatusAndClose(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockKeyring := NewMockKeyring(ctl) - mockLumeraClient := lumera.NewMockClient(ctl) - - cfg := NewConfig() - cfg.ListenAddresses = "127.0.0.1" - s, _ := New(cfg, "test", mockKeyring, mockLumeraClient, &mockService{}) - _ = s.setupGRPCServer() - - s.SetServiceStatus("test.Service", grpc_health_v1.HealthCheckResponse_SERVING) - s.Close() - - // No assertion — success is no panic / crash on shutdown -} diff --git a/supernode/node/supernode/server/status_server.go b/supernode/node/supernode/server/status_server.go deleted file mode 100644 index 9f94fac5..00000000 --- a/supernode/node/supernode/server/status_server.go +++ /dev/null @@ -1,207 +0,0 @@ -package server - -import ( - "context" - - "google.golang.org/grpc" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" -) - -// SupernodeServer implements the SupernodeService gRPC service -type SupernodeServer struct { - pb.UnimplementedSupernodeServiceServer - statusService *supernode.SupernodeStatusService - services []ServiceInfo // Store service descriptors -} - -// ServiceInfo holds information about a registered service -type ServiceInfo struct { - Name string - Methods []string -} - -// NewSupernodeServer creates a new SupernodeServer -func NewSupernodeServer(statusService *supernode.SupernodeStatusService) *SupernodeServer { - return &SupernodeServer{ - statusService: statusService, - services: []ServiceInfo{}, - } -} - -// RegisterService adds a service to the known services list -func (s *SupernodeServer) RegisterService(serviceName string, desc *grpc.ServiceDesc) { - methods := make([]string, 0, len(desc.Methods)+len(desc.Streams)) - - // Add unary methods - for _, method := range desc.Methods { - methods = append(methods, method.MethodName) - } - - // Add streaming methods - for _, stream := range desc.Streams { - methods = append(methods, stream.StreamName) - } - - s.services = append(s.services, ServiceInfo{ - Name: serviceName, - Methods: methods, - }) -} - -// GetStatus implements SupernodeService.GetStatus -func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) { - // Get status from the common service; gate P2P metrics by request flag - status, err := s.statusService.GetStatus(ctx, req.GetIncludeP2PMetrics()) - if err != nil { - return nil, err - } - - // Convert to protobuf response - response := &pb.StatusResponse{ - Version: status.Version, - UptimeSeconds: status.UptimeSeconds, - Resources: &pb.StatusResponse_Resources{ - Cpu: &pb.StatusResponse_Resources_CPU{ - UsagePercent: status.Resources.CPU.UsagePercent, - Cores: status.Resources.CPU.Cores, - }, - Memory: &pb.StatusResponse_Resources_Memory{ - TotalGb: status.Resources.Memory.TotalGB, - UsedGb: status.Resources.Memory.UsedGB, - AvailableGb: status.Resources.Memory.AvailableGB, - UsagePercent: status.Resources.Memory.UsagePercent, - }, - StorageVolumes: make([]*pb.StatusResponse_Resources_Storage, 0, len(status.Resources.Storage)), - HardwareSummary: status.Resources.HardwareSummary, - }, - RunningTasks: make([]*pb.StatusResponse_ServiceTasks, 0, len(status.RunningTasks)), - RegisteredServices: status.RegisteredServices, - Network: &pb.StatusResponse_Network{ - PeersCount: status.Network.PeersCount, - PeerAddresses: status.Network.PeerAddresses, - }, - Rank: status.Rank, - IpAddress: status.IPAddress, - } - - // Convert storage information - for _, storage := range status.Resources.Storage { - storageInfo := &pb.StatusResponse_Resources_Storage{ - Path: storage.Path, - TotalBytes: storage.TotalBytes, - UsedBytes: storage.UsedBytes, - AvailableBytes: storage.AvailableBytes, - UsagePercent: storage.UsagePercent, - } - response.Resources.StorageVolumes = append(response.Resources.StorageVolumes, storageInfo) - } - - // Convert service tasks - for _, service := range status.RunningTasks { - serviceTask := &pb.StatusResponse_ServiceTasks{ - ServiceName: service.ServiceName, - TaskIds: service.TaskIDs, - TaskCount: service.TaskCount, - } - response.RunningTasks = append(response.RunningTasks, serviceTask) - } - - // Map optional P2P metrics - if req.GetIncludeP2PMetrics() { - pm := status.P2PMetrics - pbdht := &pb.StatusResponse_P2PMetrics_DhtMetrics{} - for _, p := range pm.DhtMetrics.StoreSuccessRecent { - pbdht.StoreSuccessRecent = append(pbdht.StoreSuccessRecent, &pb.StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{ - TimeUnix: p.TimeUnix, - Requests: p.Requests, - Successful: p.Successful, - SuccessRate: p.SuccessRate, - }) - } - for _, p := range pm.DhtMetrics.BatchRetrieveRecent { - pbdht.BatchRetrieveRecent = append(pbdht.BatchRetrieveRecent, &pb.StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{ - TimeUnix: p.TimeUnix, - Keys: p.Keys, - Required: p.Required, - FoundLocal: p.FoundLocal, - FoundNetwork: p.FoundNetwork, - DurationMs: p.DurationMS, - }) - } - pbdht.HotPathBannedSkips = pm.DhtMetrics.HotPathBannedSkips - pbdht.HotPathBanIncrements = pm.DhtMetrics.HotPathBanIncrements - - pbpm := &pb.StatusResponse_P2PMetrics{ - DhtMetrics: pbdht, - NetworkHandleMetrics: map[string]*pb.StatusResponse_P2PMetrics_HandleCounters{}, - ConnPoolMetrics: map[string]int64{}, - BanList: []*pb.StatusResponse_P2PMetrics_BanEntry{}, - Database: &pb.StatusResponse_P2PMetrics_DatabaseStats{}, - Disk: &pb.StatusResponse_P2PMetrics_DiskStatus{}, - } - - // Network handle metrics - for k, v := range pm.NetworkHandleMetrics { - pbpm.NetworkHandleMetrics[k] = &pb.StatusResponse_P2PMetrics_HandleCounters{ - Total: v.Total, - Success: v.Success, - Failure: v.Failure, - Timeout: v.Timeout, - } - } - // Conn pool metrics - for k, v := range pm.ConnPoolMetrics { - pbpm.ConnPoolMetrics[k] = v - } - // Ban list - for _, b := range pm.BanList { - pbpm.BanList = append(pbpm.BanList, &pb.StatusResponse_P2PMetrics_BanEntry{ - Id: b.ID, - Ip: b.IP, - Port: b.Port, - Count: b.Count, - CreatedAtUnix: b.CreatedAtUnix, - AgeSeconds: b.AgeSeconds, - }) - } - // Database - pbpm.Database.P2PDbSizeMb = pm.Database.P2PDBSizeMB - pbpm.Database.P2PDbRecordsCount = pm.Database.P2PDBRecordsCount - // Disk - pbpm.Disk.AllMb = pm.Disk.AllMB - pbpm.Disk.UsedMb = pm.Disk.UsedMB - pbpm.Disk.FreeMb = pm.Disk.FreeMB - - // Detailed recent per-request lists removed from API - - response.P2PMetrics = pbpm - } - - // Codec configuration removed - - return response, nil -} - -// ListServices implements SupernodeService.ListServices -func (s *SupernodeServer) ListServices(ctx context.Context, req *pb.ListServicesRequest) (*pb.ListServicesResponse, error) { - // Convert internal ServiceInfo to protobuf ServiceInfo - services := make([]*pb.ServiceInfo, 0, len(s.services)) - for _, svc := range s.services { - services = append(services, &pb.ServiceInfo{ - Name: svc.Name, - Methods: svc.Methods, - }) - } - - return &pb.ListServicesResponse{ - Services: services, - Count: int32(len(services)), - }, nil -} - -// Desc implements the service interface for gRPC service registration -func (s *SupernodeServer) Desc() *grpc.ServiceDesc { - return &pb.SupernodeService_ServiceDesc -} diff --git a/supernode/node/supernode/server/status_server_test.go b/supernode/node/supernode/server/status_server_test.go deleted file mode 100644 index 251cfd8d..00000000 --- a/supernode/node/supernode/server/status_server_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package server - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" -) - -func TestSupernodeServer_GetStatus(t *testing.T) { - ctx := context.Background() - - // Create status service - statusService := supernode.NewSupernodeStatusService(nil, nil, nil) - - // Create server - server := NewSupernodeServer(statusService) - - // Test with empty service - resp, err := server.GetStatus(ctx, &pb.StatusRequest{}) - require.NoError(t, err) - assert.NotNil(t, resp) - - // Check basic structure - assert.NotNil(t, resp.Resources) - assert.NotNil(t, resp.Resources.Cpu) - assert.NotNil(t, resp.Resources.Memory) - assert.NotNil(t, resp.RunningTasks) - assert.NotNil(t, resp.RegisteredServices) - - // Check version field - assert.NotEmpty(t, resp.Version) - - // Check uptime field - assert.True(t, resp.UptimeSeconds >= 0) - - // Check CPU metrics - assert.True(t, resp.Resources.Cpu.UsagePercent >= 0) - assert.True(t, resp.Resources.Cpu.UsagePercent <= 100) - assert.True(t, resp.Resources.Cpu.Cores >= 0) - - // Check Memory metrics (now in GB) - assert.True(t, resp.Resources.Memory.TotalGb > 0) - assert.True(t, resp.Resources.Memory.UsagePercent >= 0) - assert.True(t, resp.Resources.Memory.UsagePercent <= 100) - - // Check hardware summary - if resp.Resources.Cpu.Cores > 0 && resp.Resources.Memory.TotalGb > 0 { - assert.NotEmpty(t, resp.Resources.HardwareSummary) - } - - // Check Storage (should have default root filesystem) - assert.NotEmpty(t, resp.Resources.StorageVolumes) - assert.Equal(t, "/", resp.Resources.StorageVolumes[0].Path) - - // Should have no services initially - assert.Empty(t, resp.RunningTasks) - assert.Empty(t, resp.RegisteredServices) - - // Check new fields have default values - assert.NotNil(t, resp.Network) - assert.Equal(t, int32(0), resp.Network.PeersCount) - assert.Empty(t, resp.Network.PeerAddresses) - assert.Equal(t, int32(0), resp.Rank) - assert.Empty(t, resp.IpAddress) -} - -func TestSupernodeServer_GetStatusWithService(t *testing.T) { - ctx := context.Background() - - // Create status service - statusService := supernode.NewSupernodeStatusService(nil, nil, nil) - - // Add a mock task provider - mockProvider := &common.MockTaskProvider{ - ServiceName: "test-service", - TaskIDs: []string{"task1", "task2"}, - } - statusService.RegisterTaskProvider(mockProvider) - - // Create server - server := NewSupernodeServer(statusService) - - // Test with service - resp, err := server.GetStatus(ctx, &pb.StatusRequest{}) - require.NoError(t, err) - assert.NotNil(t, resp) - - // Should have one service - assert.Len(t, resp.RunningTasks, 1) - assert.Len(t, resp.RegisteredServices, 1) - assert.Equal(t, []string{"test-service"}, resp.RegisteredServices) - - // Check service details - service := resp.RunningTasks[0] - assert.Equal(t, "test-service", service.ServiceName) - assert.Equal(t, int32(2), service.TaskCount) - assert.Equal(t, []string{"task1", "task2"}, service.TaskIds) -} - -func TestSupernodeServer_Desc(t *testing.T) { - statusService := supernode.NewSupernodeStatusService(nil, nil, nil) - server := NewSupernodeServer(statusService) - - desc := server.Desc() - assert.NotNil(t, desc) - assert.Equal(t, "supernode.SupernodeService", desc.ServiceName) -} diff --git a/supernode/services/cascade/adaptors/lumera.go b/supernode/services/cascade/adaptors/lumera.go deleted file mode 100644 index f5e3b52e..00000000 --- a/supernode/services/cascade/adaptors/lumera.go +++ /dev/null @@ -1,81 +0,0 @@ -package adaptors - -import ( - "context" - "fmt" - - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - sdktx "github.com/cosmos/cosmos-sdk/types/tx" -) - -//go:generate mockgen -destination=mocks/lumera_mock.go -package=cascadeadaptormocks -source=lumera.go - -// LumeraClient defines the interface for interacting with Lumera chain data during cascade registration. -type LumeraClient interface { - // SupernodeModule - GetTopSupernodes(ctx context.Context, height uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) - - // Action Module - GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) - FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) - SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) - GetActionFee(ctx context.Context, dataSize string) (*actiontypes.QueryGetActionFeeResponse, error) - // Auth - Verify(ctx context.Context, creator string, file []byte, sigBytes []byte) error -} - -// Client is the concrete implementation used in production. -type Client struct { - lc lumera.Client -} - -func NewLumeraClient(client lumera.Client) LumeraClient { - return &Client{ - lc: client, - } -} - -func (c *Client) GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) { - return c.lc.Action().GetAction(ctx, actionID) -} - -func (c *Client) GetActionFee(ctx context.Context, dataSize string) (*actiontypes.QueryGetActionFeeResponse, error) { - return c.lc.Action().GetActionFee(ctx, dataSize) -} - -func (c *Client) FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) { - resp, err := c.lc.ActionMsg().FinalizeCascadeAction(ctx, actionID, rqids) - if err != nil { - // Preserve underlying gRPC status/details - return nil, fmt.Errorf("finalize cascade action broadcast failed: %w", err) - } - - // Surface chain-level failures (non-zero code) with rich context - if resp != nil && resp.TxResponse != nil && resp.TxResponse.Code != 0 { - return nil, fmt.Errorf( - "tx failed: code=%d codespace=%s height=%d gas_wanted=%d gas_used=%d raw_log=%s", - resp.TxResponse.Code, - resp.TxResponse.Codespace, - resp.TxResponse.Height, - resp.TxResponse.GasWanted, - resp.TxResponse.GasUsed, - resp.TxResponse.RawLog, - ) - } - - return resp, nil -} - -func (c *Client) SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) { - return c.lc.ActionMsg().SimulateFinalizeCascadeAction(ctx, actionID, rqids) -} - -func (c *Client) GetTopSupernodes(ctx context.Context, height uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) { - return c.lc.SuperNode().GetTopSuperNodesForBlock(ctx, height) -} - -func (c *Client) Verify(ctx context.Context, creator string, file []byte, sigBytes []byte) error { - return c.lc.Auth().Verify(ctx, creator, file, sigBytes) -} diff --git a/supernode/services/cascade/adaptors/mocks/lumera_mock.go b/supernode/services/cascade/adaptors/mocks/lumera_mock.go deleted file mode 100644 index 29cdd48f..00000000 --- a/supernode/services/cascade/adaptors/mocks/lumera_mock.go +++ /dev/null @@ -1,127 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: lumera.go - -// Package cascadeadaptormocks is a generated GoMock package. -package cascadeadaptormocks - -import ( - context "context" - reflect "reflect" - - types "github.com/LumeraProtocol/lumera/x/action/v1/types" - types0 "github.com/LumeraProtocol/lumera/x/supernode/v1/types" - tx "github.com/cosmos/cosmos-sdk/types/tx" - gomock "github.com/golang/mock/gomock" -) - -// MockLumeraClient is a mock of LumeraClient interface. -type MockLumeraClient struct { - ctrl *gomock.Controller - recorder *MockLumeraClientMockRecorder -} - -// MockLumeraClientMockRecorder is the mock recorder for MockLumeraClient. -type MockLumeraClientMockRecorder struct { - mock *MockLumeraClient -} - -// NewMockLumeraClient creates a new mock instance. -func NewMockLumeraClient(ctrl *gomock.Controller) *MockLumeraClient { - mock := &MockLumeraClient{ctrl: ctrl} - mock.recorder = &MockLumeraClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockLumeraClient) EXPECT() *MockLumeraClientMockRecorder { - return m.recorder -} - -// FinalizeAction mocks base method. -func (m *MockLumeraClient) FinalizeAction(ctx context.Context, actionID string, rqids []string) (*tx.BroadcastTxResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FinalizeAction", ctx, actionID, rqids) - ret0, _ := ret[0].(*tx.BroadcastTxResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FinalizeAction indicates an expected call of FinalizeAction. -func (mr *MockLumeraClientMockRecorder) FinalizeAction(ctx, actionID, rqids interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinalizeAction", reflect.TypeOf((*MockLumeraClient)(nil).FinalizeAction), ctx, actionID, rqids) -} - -// GetAction mocks base method. -func (m *MockLumeraClient) GetAction(ctx context.Context, actionID string) (*types.QueryGetActionResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAction", ctx, actionID) - ret0, _ := ret[0].(*types.QueryGetActionResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAction indicates an expected call of GetAction. -func (mr *MockLumeraClientMockRecorder) GetAction(ctx, actionID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAction", reflect.TypeOf((*MockLumeraClient)(nil).GetAction), ctx, actionID) -} - -// GetActionFee mocks base method. -func (m *MockLumeraClient) GetActionFee(ctx context.Context, dataSize string) (*types.QueryGetActionFeeResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetActionFee", ctx, dataSize) - ret0, _ := ret[0].(*types.QueryGetActionFeeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetActionFee indicates an expected call of GetActionFee. -func (mr *MockLumeraClientMockRecorder) GetActionFee(ctx, dataSize interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActionFee", reflect.TypeOf((*MockLumeraClient)(nil).GetActionFee), ctx, dataSize) -} - -// GetTopSupernodes mocks base method. -func (m *MockLumeraClient) GetTopSupernodes(ctx context.Context, height uint64) (*types0.QueryGetTopSuperNodesForBlockResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTopSupernodes", ctx, height) - ret0, _ := ret[0].(*types0.QueryGetTopSuperNodesForBlockResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetTopSupernodes indicates an expected call of GetTopSupernodes. -func (mr *MockLumeraClientMockRecorder) GetTopSupernodes(ctx, height interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTopSupernodes", reflect.TypeOf((*MockLumeraClient)(nil).GetTopSupernodes), ctx, height) -} - -// SimulateFinalizeAction mocks base method. -func (m *MockLumeraClient) SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*tx.SimulateResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SimulateFinalizeAction", ctx, actionID, rqids) - ret0, _ := ret[0].(*tx.SimulateResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SimulateFinalizeAction indicates an expected call of SimulateFinalizeAction. -func (mr *MockLumeraClientMockRecorder) SimulateFinalizeAction(ctx, actionID, rqids interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateFinalizeAction", reflect.TypeOf((*MockLumeraClient)(nil).SimulateFinalizeAction), ctx, actionID, rqids) -} - -// Verify mocks base method. -func (m *MockLumeraClient) Verify(ctx context.Context, creator string, file, sigBytes []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Verify", ctx, creator, file, sigBytes) - ret0, _ := ret[0].(error) - return ret0 -} - -// Verify indicates an expected call of Verify. -func (mr *MockLumeraClientMockRecorder) Verify(ctx, creator, file, sigBytes interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockLumeraClient)(nil).Verify), ctx, creator, file, sigBytes) -} diff --git a/supernode/services/cascade/adaptors/mocks/p2p_mock.go b/supernode/services/cascade/adaptors/mocks/p2p_mock.go deleted file mode 100644 index ec99d92a..00000000 --- a/supernode/services/cascade/adaptors/mocks/p2p_mock.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: p2p.go - -// Package cascadeadaptormocks is a generated GoMock package. -package cascadeadaptormocks - -import ( - context "context" - reflect "reflect" - - logtrace "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - adaptors "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - gomock "github.com/golang/mock/gomock" -) - -// MockP2PService is a mock of P2PService interface. -type MockP2PService struct { - ctrl *gomock.Controller - recorder *MockP2PServiceMockRecorder -} - -// MockP2PServiceMockRecorder is the mock recorder for MockP2PService. -type MockP2PServiceMockRecorder struct { - mock *MockP2PService -} - -// NewMockP2PService creates a new mock instance. -func NewMockP2PService(ctrl *gomock.Controller) *MockP2PService { - mock := &MockP2PService{ctrl: ctrl} - mock.recorder = &MockP2PServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockP2PService) EXPECT() *MockP2PServiceMockRecorder { - return m.recorder -} - -// StoreArtefacts mocks base method. -func (m *MockP2PService) StoreArtefacts(ctx context.Context, req adaptors.StoreArtefactsRequest, f logtrace.Fields) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StoreArtefacts", ctx, req, f) - ret0, _ := ret[0].(error) - return ret0 -} - -// StoreArtefacts indicates an expected call of StoreArtefacts. -func (mr *MockP2PServiceMockRecorder) StoreArtefacts(ctx, req, f interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreArtefacts", reflect.TypeOf((*MockP2PService)(nil).StoreArtefacts), ctx, req, f) -} diff --git a/supernode/services/cascade/adaptors/mocks/rq_mock.go b/supernode/services/cascade/adaptors/mocks/rq_mock.go deleted file mode 100644 index f45f2eb5..00000000 --- a/supernode/services/cascade/adaptors/mocks/rq_mock.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: rq.go - -// Package cascadeadaptormocks is a generated GoMock package. -package cascadeadaptormocks - -import ( - context "context" - reflect "reflect" - - codec "github.com/LumeraProtocol/supernode/v2/pkg/codec" - adaptors "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - gomock "github.com/golang/mock/gomock" -) - -// MockCodecService is a mock of CodecService interface. -type MockCodecService struct { - ctrl *gomock.Controller - recorder *MockCodecServiceMockRecorder -} - -// MockCodecServiceMockRecorder is the mock recorder for MockCodecService. -type MockCodecServiceMockRecorder struct { - mock *MockCodecService -} - -// NewMockCodecService creates a new mock instance. -func NewMockCodecService(ctrl *gomock.Controller) *MockCodecService { - mock := &MockCodecService{ctrl: ctrl} - mock.recorder = &MockCodecServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCodecService) EXPECT() *MockCodecServiceMockRecorder { - return m.recorder -} - -// Decode mocks base method. -func (m *MockCodecService) Decode(ctx context.Context, req adaptors.DecodeRequest) (adaptors.DecodeResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Decode", ctx, req) - ret0, _ := ret[0].(adaptors.DecodeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Decode indicates an expected call of Decode. -func (mr *MockCodecServiceMockRecorder) Decode(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Decode", reflect.TypeOf((*MockCodecService)(nil).Decode), ctx, req) -} - -// EncodeInput mocks base method. -func (m *MockCodecService) EncodeInput(ctx context.Context, taskID, path string, dataSize int) (adaptors.EncodeResult, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EncodeInput", ctx, taskID, path, dataSize) - ret0, _ := ret[0].(adaptors.EncodeResult) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// EncodeInput indicates an expected call of EncodeInput. -func (mr *MockCodecServiceMockRecorder) EncodeInput(ctx, taskID, path, dataSize interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EncodeInput", reflect.TypeOf((*MockCodecService)(nil).EncodeInput), ctx, taskID, path, dataSize) -} - -// PrepareDecode mocks base method. -func (m *MockCodecService) PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) ([]string, func(int, string, []byte) (string, error), func() error, *codec.Workspace, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrepareDecode", ctx, actionID, layout) - ret0, _ := ret[0].([]string) - ret1, _ := ret[1].(func(int, string, []byte) (string, error)) - ret2, _ := ret[2].(func() error) - ret3, _ := ret[3].(*codec.Workspace) - ret4, _ := ret[4].(error) - return ret0, ret1, ret2, ret3, ret4 -} - -// PrepareDecode indicates an expected call of PrepareDecode. -func (mr *MockCodecServiceMockRecorder) PrepareDecode(ctx, actionID, layout interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareDecode", reflect.TypeOf((*MockCodecService)(nil).PrepareDecode), ctx, actionID, layout) -} diff --git a/supernode/services/cascade/adaptors/rq.go b/supernode/services/cascade/adaptors/rq.go deleted file mode 100644 index 92e89819..00000000 --- a/supernode/services/cascade/adaptors/rq.go +++ /dev/null @@ -1,81 +0,0 @@ -package adaptors - -import ( - "context" - - "github.com/LumeraProtocol/supernode/v2/pkg/codec" -) - -// CodecService defines the interface for RaptorQ encoding of input data. -// -//go:generate mockgen -destination=mocks/rq_mock.go -package=cascadeadaptormocks -source=rq.go -type CodecService interface { - EncodeInput(ctx context.Context, taskID string, path string, dataSize int) (EncodeResult, error) - PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) (blockPaths []string, Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *codec.Workspace, err error) - Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) -} - -// EncodeResult represents the outcome of encoding the input data. -type EncodeResult struct { - SymbolsDir string - Metadata codec.Layout -} - -// codecImpl is the default implementation using the real codec service. -type codecImpl struct { - codec codec.Codec -} - -// NewCodecService creates a new production instance of CodecService. -func NewCodecService(codec codec.Codec) CodecService { - return &codecImpl{codec: codec} -} - -// EncodeInput encodes the provided data and returns symbols and metadata. -func (c *codecImpl) EncodeInput(ctx context.Context, taskID string, path string, dataSize int) (EncodeResult, error) { - resp, err := c.codec.Encode(ctx, codec.EncodeRequest{ - TaskID: taskID, - Path: path, - DataSize: dataSize, - }) - if err != nil { - return EncodeResult{}, err - } - - return EncodeResult{ - SymbolsDir: resp.SymbolsDir, - Metadata: resp.Metadata, - }, nil -} - -type DecodeRequest struct { - Symbols map[string][]byte - Layout codec.Layout - ActionID string -} - -type DecodeResponse struct { - DecodeTmpDir string - FilePath string -} - -// Decode decodes the provided symbols and returns the original file -func (c *codecImpl) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) { - resp, err := c.codec.Decode(ctx, codec.DecodeRequest{ - Symbols: req.Symbols, - Layout: req.Layout, - ActionID: req.ActionID, - }) - if err != nil { - return DecodeResponse{}, err - } - - return DecodeResponse{ - FilePath: resp.FilePath, - DecodeTmpDir: resp.DecodeTmpDir, - }, nil -} - -func (c *codecImpl) PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) (blockPaths []string, Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *codec.Workspace, err error) { - return -} diff --git a/supernode/services/cascade/config.go b/supernode/services/cascade/config.go deleted file mode 100644 index 7a0f1ef2..00000000 --- a/supernode/services/cascade/config.go +++ /dev/null @@ -1,13 +0,0 @@ -package cascade - -import ( - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" -) - -// Config contains settings for the cascade service -type Config struct { - common.Config `mapstructure:",squash" json:"-"` - - RaptorQServiceAddress string `mapstructure:"-" json:"-"` - RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"` -} diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go deleted file mode 100644 index 3cce953c..00000000 --- a/supernode/services/cascade/download.go +++ /dev/null @@ -1,375 +0,0 @@ -package cascade - -import ( - "context" - "encoding/json" - "fmt" - "os" - "sort" - "time" - - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/crypto" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" -) - -const targetRequiredPercent = 17 - -type DownloadRequest struct { - ActionID string - // Signature is required for private downloads. For public cascade - // actions (metadata.Public == true), this is ignored. - Signature string -} - -type DownloadResponse struct { - EventType SupernodeEventType - Message string - FilePath string - DownloadedDir string -} - -// Download retrieves a cascade artefact by action ID. -// -// Authorization behavior: -// - If the cascade metadata has Public = true, signature verification is skipped -// and the file is downloadable by anyone. -// - If Public = false, a valid download signature is required. -func (task *CascadeRegistrationTask) Download( - ctx context.Context, - req *DownloadRequest, - send func(resp *DownloadResponse) error, -) (err error) { - // Seed correlation ID and origin from actionID for downstream logs - if req != nil && req.ActionID != "" { - ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) - ctx = logtrace.CtxWithOrigin(ctx, "download") - } - fields := logtrace.Fields{logtrace.FieldMethod: "Download", logtrace.FieldRequest: req} - logtrace.Info(ctx, "download: request", fields) - - // Ensure task status is finalized regardless of outcome - defer func() { - if err != nil { - task.UpdateStatus(common.StatusTaskCanceled) - } else { - task.UpdateStatus(common.StatusTaskCompleted) - } - task.Cancel() - }() - - actionDetails, err := task.LumeraClient.GetAction(ctx, req.ActionID) - if err != nil { - // Ensure error is logged as string for consistency - fields[logtrace.FieldError] = err.Error() - return task.wrapErr(ctx, "failed to get action", err, fields) - } - logtrace.Info(ctx, "download: action fetched", fields) - task.streamDownloadEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", "", send) - - if actionDetails.GetAction().State != actiontypes.ActionStateDone { - // Return a clearer error message when action is not yet finalized - err = errors.New("action is not in a valid state") - fields[logtrace.FieldError] = "action state is not done yet" - fields[logtrace.FieldActionState] = actionDetails.GetAction().State - return task.wrapErr(ctx, "action not finalized yet", err, fields) - } - logtrace.Info(ctx, "download: action state ok", fields) - - metadata, err := cascadekit.UnmarshalCascadeMetadata(actionDetails.GetAction().Metadata) - if err != nil { - fields[logtrace.FieldError] = err.Error() - return task.wrapErr(ctx, "error decoding cascade metadata", err, fields) - } - logtrace.Info(ctx, "download: metadata decoded", fields) - task.streamDownloadEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", "", send) - - // Enforce download authorization based on metadata.Public - // - If public: skip signature verification; allow anonymous downloads - // - If private: require a valid signature - if !metadata.Public { - if req.Signature == "" { - fields[logtrace.FieldError] = "missing signature for private download" - // Provide a descriptive message without a fabricated root error - return task.wrapErr(ctx, "private cascade requires a download signature", nil, fields) - } - if err := task.VerifyDownloadSignature(ctx, req.ActionID, req.Signature); err != nil { - fields[logtrace.FieldError] = err.Error() - return task.wrapErr(ctx, "failed to verify download signature", err, fields) - } - logtrace.Info(ctx, "download: signature verified", fields) - } else { - logtrace.Info(ctx, "download: public cascade (no signature)", fields) - } - - // Notify: network retrieval phase begins - task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) - - logtrace.Info(ctx, "download: network retrieval start", logtrace.Fields{logtrace.FieldActionID: actionDetails.GetAction().ActionID}) - filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields, send) - if err != nil { - fields[logtrace.FieldError] = err.Error() - // Ensure temporary decode directory is cleaned if decode failed after being created - if tmpDir != "" { - if cerr := task.CleanupDownload(ctx, tmpDir); cerr != nil { - logtrace.Warn(ctx, "cleanup of tmp dir after error failed", logtrace.Fields{"tmp_dir": tmpDir, logtrace.FieldError: cerr.Error()}) - } - } - return task.wrapErr(ctx, "failed to download artifacts", err, fields) - } - logtrace.Debug(ctx, "File reconstructed and hash verified", fields) - // Notify: decode completed, file ready on disk - task.streamDownloadEvent(SupernodeEventTypeDecodeCompleted, "Decode completed", filePath, tmpDir, send) - - return nil -} - -func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, actionID string, metadata actiontypes.CascadeMetadata, fields logtrace.Fields, send func(resp *DownloadResponse) error) (string, string, error) { - logtrace.Debug(ctx, "started downloading the artifacts", fields) - - var ( - layout codec.Layout - layoutFetchMS int64 - layoutDecodeMS int64 - layoutAttempts int - ) - - for _, indexID := range metadata.RqIdsIds { - iStart := time.Now() - logtrace.Debug(ctx, "RPC Retrieve index file", logtrace.Fields{"index_id": indexID}) - indexFile, err := task.P2PClient.Retrieve(ctx, indexID) - if err != nil || len(indexFile) == 0 { - logtrace.Warn(ctx, "Retrieve index file failed or empty", logtrace.Fields{"index_id": indexID, logtrace.FieldError: fmt.Sprintf("%v", err)}) - continue - } - logtrace.Debug(ctx, "Retrieve index file completed", logtrace.Fields{"index_id": indexID, "bytes": len(indexFile), "ms": time.Since(iStart).Milliseconds()}) - - // Parse index file to get layout IDs - indexData, err := cascadekit.ParseCompressedIndexFile(indexFile) - if err != nil { - logtrace.Warn(ctx, "failed to parse index file", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error()}) - continue - } - - // Try to retrieve layout files using layout IDs from index file - var netMS, decMS int64 - layout, netMS, decMS, layoutAttempts, err = task.retrieveLayoutFromIndex(ctx, indexData, fields) - if err != nil { - logtrace.Warn(ctx, "failed to retrieve layout from index", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error(), "attempts": layoutAttempts}) - continue - } - layoutFetchMS = netMS - layoutDecodeMS = decMS - - if len(layout.Blocks) > 0 { - logtrace.Debug(ctx, "layout file retrieved via index", logtrace.Fields{"index_id": indexID, "attempts": layoutAttempts, "net_ms": layoutFetchMS, "decode_ms": layoutDecodeMS}) - break - } - } - - if len(layout.Blocks) == 0 { - return "", "", errors.New("no symbols found in RQ metadata") - } - // Persist layout timing in fields for downstream metrics - fields["layout_fetch_ms"] = layoutFetchMS - fields["layout_decode_ms"] = layoutDecodeMS - fields["layout_attempts"] = layoutAttempts - return task.restoreFileFromLayout(ctx, layout, metadata.DataHash, actionID, send) -} - -// restoreFileFromLayout reconstructs the original file from the provided layout -// and a subset of retrieved symbols. The method deduplicates symbol identifiers -// before network retrieval to avoid redundant requests and ensure the requested -// count reflects unique symbols only. -func (task *CascadeRegistrationTask) restoreFileFromLayout( - ctx context.Context, - layout codec.Layout, - dataHash string, - actionID string, - send func(resp *DownloadResponse) error, -) (string, string, error) { - - fields := logtrace.Fields{ - logtrace.FieldActionID: actionID, - } - // Deduplicate symbols across blocks to avoid redundant requests - symSet := make(map[string]struct{}) - for _, block := range layout.Blocks { - for _, s := range block.Symbols { - symSet[s] = struct{}{} - } - } - allSymbols := make([]string, 0, len(symSet)) - for s := range symSet { - allSymbols = append(allSymbols, s) - } - sort.Strings(allSymbols) - - totalSymbols := len(allSymbols) - fields["totalSymbols"] = totalSymbols - // Compute target requirement (reporting only; does not change behavior) - targetRequiredCount := (totalSymbols*targetRequiredPercent + 99) / 100 - if targetRequiredCount < 1 && totalSymbols > 0 { - targetRequiredCount = 1 - } - logtrace.Info(ctx, "download: plan symbols", logtrace.Fields{"total_symbols": totalSymbols, "target_required_percent": targetRequiredPercent, "target_required_count": targetRequiredCount}) - - // Measure symbols batch retrieve duration - retrieveStart := time.Now() - // Use context as-is; metrics task tagging removed - // Retrieve only a fraction of symbols (targetRequiredCount) based on redundancy - // The DHT will short-circuit once it finds the required number across the provided keys - reqCount := targetRequiredCount - if reqCount > totalSymbols { - reqCount = totalSymbols - } - rStart := time.Now() - logtrace.Info(ctx, "download: batch retrieve start", logtrace.Fields{"action_id": actionID, "requested": reqCount, "total_candidates": totalSymbols}) - symbols, err := task.P2PClient.BatchRetrieve(ctx, allSymbols, reqCount, actionID) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "batch retrieve failed", fields) - return "", "", fmt.Errorf("batch retrieve symbols: %w", err) - } - retrieveMS := time.Since(retrieveStart).Milliseconds() - logtrace.Info(ctx, "download: batch retrieve ok", logtrace.Fields{"action_id": actionID, "received": len(symbols), "ms": time.Since(rStart).Milliseconds()}) - - // Measure decode duration - decodeStart := time.Now() - dStart := time.Now() - logtrace.Info(ctx, "download: decode start", logtrace.Fields{"action_id": actionID}) - decodeInfo, err := task.RQ.Decode(ctx, adaptors.DecodeRequest{ - ActionID: actionID, - Symbols: symbols, - Layout: layout, - }) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "decode failed", fields) - return "", "", fmt.Errorf("decode symbols using RaptorQ: %w", err) - } - decodeMS := time.Since(decodeStart).Milliseconds() - logtrace.Info(ctx, "download: decode ok", logtrace.Fields{"action_id": actionID, "ms": time.Since(dStart).Milliseconds(), "tmp_dir": decodeInfo.DecodeTmpDir, "file_path": decodeInfo.FilePath}) - - // Emit minimal JSON payload (metrics system removed) - minPayload := map[string]any{ - "retrieve": map[string]any{ - "retrieve_ms": retrieveMS, - "decode_ms": decodeMS, - "target_required_percent": targetRequiredPercent, - "target_required_count": targetRequiredCount, - "total_symbols": totalSymbols, - }, - } - if b, err := json.MarshalIndent(minPayload, "", " "); err == nil { - task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), "", "", send) - } - - fileHash, err := crypto.HashFileIncrementally(decodeInfo.FilePath, 0) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to hash file", fields) - return "", "", fmt.Errorf("hash file: %w", err) - } - if fileHash == nil { - fields[logtrace.FieldError] = "file hash is nil" - logtrace.Error(ctx, "failed to hash file", fields) - return "", "", errors.New("file hash is nil") - } - - err = cascadekit.VerifyB64DataHash(fileHash, dataHash) - if err != nil { - logtrace.Error(ctx, "failed to verify hash", fields) - fields[logtrace.FieldError] = err.Error() - return "", decodeInfo.DecodeTmpDir, err - } - // Preserve original debug log for successful hash match - logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) - // Log the state of the temporary decode directory - if decodeInfo.DecodeTmpDir != "" { - if set, derr := utils.ReadDirFilenames(decodeInfo.DecodeTmpDir); derr == nil { - if left := len(set); left > 0 { - logtrace.Debug(ctx, "Decode tmp directory has files remaining", logtrace.Fields{"dir": decodeInfo.DecodeTmpDir, "left": left}) - } else { - logtrace.Debug(ctx, "Decode tmp directory is empty", logtrace.Fields{"dir": decodeInfo.DecodeTmpDir}) - } - } - } - logtrace.Info(ctx, "download: file verified", fields) - - return decodeInfo.FilePath, decodeInfo.DecodeTmpDir, nil -} - -func (task *CascadeRegistrationTask) streamDownloadEvent(eventType SupernodeEventType, msg string, filePath string, tmpDir string, send func(resp *DownloadResponse) error) { - _ = send(&DownloadResponse{ - EventType: eventType, - Message: msg, - FilePath: filePath, - DownloadedDir: tmpDir, - }) -} - -// parseIndexFile parses compressed index file to extract IndexFile structure -// parseIndexFile moved to cascadekit.ParseCompressedIndexFile - -// retrieveLayoutFromIndex retrieves layout file using layout IDs from index file -func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context, indexData cascadekit.IndexFile, fields logtrace.Fields) (codec.Layout, int64, int64, int, error) { - // Try to retrieve layout files using layout IDs from index file - var ( - totalFetchMS int64 - totalDecodeMS int64 - attempts int - ) - for _, layoutID := range indexData.LayoutIDs { - attempts++ - t0 := time.Now() - logtrace.Debug(ctx, "RPC Retrieve layout file", logtrace.Fields{"layout_id": layoutID, "attempt": attempts}) - layoutFile, err := task.P2PClient.Retrieve(ctx, layoutID) - took := time.Since(t0).Milliseconds() - totalFetchMS += took - if err != nil || len(layoutFile) == 0 { - logtrace.Warn(ctx, "Retrieve layout file failed or empty", logtrace.Fields{"layout_id": layoutID, "attempt": attempts, "ms": took, logtrace.FieldError: fmt.Sprintf("%v", err)}) - continue - } - - t1 := time.Now() - layout, _, _, err := cascadekit.ParseRQMetadataFile(layoutFile) - decMS := time.Since(t1).Milliseconds() - totalDecodeMS += decMS - if err != nil { - logtrace.Warn(ctx, "Parse layout file failed", logtrace.Fields{"layout_id": layoutID, "attempt": attempts, "decode_ms": decMS, logtrace.FieldError: err.Error()}) - continue - } - - if len(layout.Blocks) > 0 { - logtrace.Debug(ctx, "Layout file retrieved and parsed", logtrace.Fields{"layout_id": layoutID, "attempt": attempts, "net_ms": took, "decode_ms": decMS}) - return layout, totalFetchMS, totalDecodeMS, attempts, nil - } - } - - return codec.Layout{}, totalFetchMS, totalDecodeMS, attempts, errors.New("no valid layout found in index") -} - -// CleanupDownload removes the temporary directory created during decode. -// The parameter is a directory path (not an action ID). -func (task *CascadeRegistrationTask) CleanupDownload(ctx context.Context, dirPath string) error { - if dirPath == "" { - return errors.New("directory path is empty") - } - - // For now, we use tmp directory path as provided by decoder - logtrace.Debug(ctx, "Cleanup download directory", logtrace.Fields{"dir": dirPath}) - if err := os.RemoveAll(dirPath); err != nil { - logtrace.Warn(ctx, "Cleanup download directory failed", logtrace.Fields{"dir": dirPath, logtrace.FieldError: err.Error()}) - return errors.Errorf("failed to delete download directory: %s, :%s", dirPath, err.Error()) - } - logtrace.Debug(ctx, "Cleanup download directory completed", logtrace.Fields{"dir": dirPath}) - - return nil -} diff --git a/supernode/services/cascade/events_test.go b/supernode/services/cascade/events_test.go deleted file mode 100644 index ddf98871..00000000 --- a/supernode/services/cascade/events_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package cascade - -import ( - "testing" -) - -func TestSupernodeEventTypeValues(t *testing.T) { - tests := []struct { - name string - value SupernodeEventType - expected int - }{ - {"UNKNOWN", SupernodeEventTypeUNKNOWN, 0}, - {"ActionRetrieved", SupernodeEventTypeActionRetrieved, 1}, - {"ActionFeeVerified", SupernodeEventTypeActionFeeVerified, 2}, - {"TopSupernodeCheckPassed", SupernodeEventTypeTopSupernodeCheckPassed, 3}, - {"MetadataDecoded", SupernodeEventTypeMetadataDecoded, 4}, - {"DataHashVerified", SupernodeEventTypeDataHashVerified, 5}, - {"InputEncoded", SupernodeEventTypeInputEncoded, 6}, - {"SignatureVerified", SupernodeEventTypeSignatureVerified, 7}, - {"RQIDsGenerated", SupernodeEventTypeRQIDsGenerated, 8}, - {"RqIDsVerified", SupernodeEventTypeRqIDsVerified, 9}, - {"FinalizeSimulated", SupernodeEventTypeFinalizeSimulated, 10}, - {"ArtefactsStored", SupernodeEventTypeArtefactsStored, 11}, - {"ActionFinalized", SupernodeEventTypeActionFinalized, 12}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if int(tt.value) != tt.expected { - t.Errorf("Expected %s to be %d, got %d", tt.name, tt.expected, tt.value) - } - }) - } -} diff --git a/supernode/services/cascade/helper.go b/supernode/services/cascade/helper.go deleted file mode 100644 index 4084243a..00000000 --- a/supernode/services/cascade/helper.go +++ /dev/null @@ -1,293 +0,0 @@ -package cascade - -import ( - "context" - "encoding/base64" - "fmt" - "strconv" - - "cosmossdk.io/math" - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - - sdk "github.com/cosmos/cosmos-sdk/types" - json "github.com/json-iterator/go" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// layout stats helpers removed to keep download metrics minimal. - -func (task *CascadeRegistrationTask) fetchAction(ctx context.Context, actionID string, f logtrace.Fields) (*actiontypes.Action, error) { - res, err := task.LumeraClient.GetAction(ctx, actionID) - if err != nil { - return nil, task.wrapErr(ctx, "failed to get action", err, f) - } - - if res.GetAction().ActionID == "" { - return nil, task.wrapErr(ctx, "action not found", errors.New(""), f) - } - logtrace.Debug(ctx, "action has been retrieved", f) - - return res.GetAction(), nil -} - -func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, blockHeight uint64, f logtrace.Fields) error { - top, err := task.LumeraClient.GetTopSupernodes(ctx, blockHeight) - if err != nil { - return task.wrapErr(ctx, "failed to get top SNs", err, f) - } - logtrace.Debug(ctx, "Fetched Top Supernodes", f) - - if !supernode.Exists(top.Supernodes, task.config.SupernodeAccountAddress) { - // Build information about supernodes for better error context - addresses := make([]string, len(top.Supernodes)) - for i, sn := range top.Supernodes { - addresses[i] = sn.SupernodeAccount - } - logtrace.Debug(ctx, "Supernode not in top list", logtrace.Fields{ - "currentAddress": task.config.SupernodeAccountAddress, - "topSupernodes": addresses, - }) - return task.wrapErr(ctx, "current supernode does not exist in the top SNs list", - errors.Errorf("current address: %s, top supernodes: %v", task.config.SupernodeAccountAddress, addresses), f) - } - - return nil -} - -// decodeCascadeMetadata moved to cascadekit.UnmarshalCascadeMetadata -// verifyDataHash moved to cascadekit.VerifyB64DataHash - -func (task *CascadeRegistrationTask) encodeInput(ctx context.Context, actionID string, path string, dataSize int, f logtrace.Fields) (*adaptors.EncodeResult, error) { - resp, err := task.RQ.EncodeInput(ctx, actionID, path, dataSize) - if err != nil { - return nil, task.wrapErr(ctx, "failed to encode data", err, f) - } - return &resp, nil -} - -func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context.Context, encoded string, creator string, - encodedMeta codec.Layout, f logtrace.Fields) (codec.Layout, string, error) { - - // Extract index file and creator signature from encoded data - // The signatures field contains: Base64(index_file).creators_signature - indexFileB64, creatorSig, err := cascadekit.ExtractIndexAndCreatorSig(encoded) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to extract index file and creator signature", err, f) - } - - // Verify creator signature on index file - creatorSigBytes, err := base64.StdEncoding.DecodeString(creatorSig) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode creator signature from base64", err, f) - } - - if err := task.LumeraClient.Verify(ctx, creator, []byte(indexFileB64), creatorSigBytes); err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify creator signature", err, f) - } - logtrace.Debug(ctx, "creator signature successfully verified", f) - - // Decode index file to get the layout signature - indexFile, err := cascadekit.DecodeIndexB64(indexFileB64) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode index file", err, f) - } - - // Verify layout signature on the actual layout - layoutSigBytes, err := base64.StdEncoding.DecodeString(indexFile.LayoutSignature) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode layout signature from base64", err, f) - } - - layoutJSON, err := json.Marshal(encodedMeta) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to marshal layout", err, f) - } - layoutB64 := utils.B64Encode(layoutJSON) - if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSigBytes); err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify layout signature", err, f) - } - logtrace.Debug(ctx, "layout signature successfully verified", f) - - return encodedMeta, indexFile.LayoutSignature, nil -} - -func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta actiontypes.CascadeMetadata, - sig, creator string, encodedMeta codec.Layout, f logtrace.Fields) (cascadekit.GenRQIdentifiersFilesResponse, error) { - // The signatures field contains: Base64(index_file).creators_signature - // This full format will be used for ID generation to match chain expectations - - // Generate layout files (redundant metadata files) - layoutRes, err := cascadekit.GenerateLayoutFiles(ctx, encodedMeta, sig, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) - if err != nil { - return cascadekit.GenRQIdentifiersFilesResponse{}, - task.wrapErr(ctx, "failed to generate layout files", err, f) - } - - // Generate index files using full signatures format for ID generation (matches chain expectation) - indexIDs, indexFiles, err := cascadekit.GenerateIndexFiles(ctx, meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) - if err != nil { - return cascadekit.GenRQIdentifiersFilesResponse{}, - task.wrapErr(ctx, "failed to generate index files", err, f) - } - - // Store layout files and index files separately in P2P - allFiles := append(layoutRes.RedundantMetadataFiles, indexFiles...) - - // Return index IDs (sent to chain) and all files (stored in P2P) - return cascadekit.GenRQIdentifiersFilesResponse{ - RQIDs: indexIDs, - RedundantMetadataFiles: allFiles, - }, nil -} - -// storeArtefacts persists cascade artefacts (ID files + RaptorQ symbols) via the -// P2P adaptor. P2P does not return metrics; cascade summarizes and emits them. -func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionID string, idFiles [][]byte, symbolsDir string, f logtrace.Fields) error { - if f == nil { - f = logtrace.Fields{} - } - lf := logtrace.Fields{ - logtrace.FieldActionID: actionID, - logtrace.FieldTaskID: task.ID(), - "id_files_count": len(idFiles), - "symbols_dir": symbolsDir, - } - for k, v := range f { - lf[k] = v - } - // Tag the flow as first-pass just before handing over to P2P - ctx = logtrace.CtxWithOrigin(ctx, "first_pass") - logtrace.Info(ctx, "store: first-pass begin", lf) - - if err := task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{ - IDFiles: idFiles, - SymbolsDir: symbolsDir, - TaskID: task.ID(), - ActionID: actionID, - }, f); err != nil { - // Log and wrap to ensure a proper error line and context - return task.wrapErr(ctx, "failed to store artefacts", err, lf) - } - return nil -} - -func (task *CascadeRegistrationTask) wrapErr(ctx context.Context, msg string, err error, f logtrace.Fields) error { - if err != nil { - f[logtrace.FieldError] = err.Error() - } - logtrace.Error(ctx, msg, f) - - // Preserve the root cause in the gRPC error description so callers receive full context. - if err != nil { - return status.Errorf(codes.Internal, "%s: %v", msg, err) - } - return status.Errorf(codes.Internal, "%s", msg) -} - -// emitArtefactsStored builds a single-line metrics summary and emits the -// SupernodeEventTypeArtefactsStored event while logging the metrics line. -func (task *CascadeRegistrationTask) emitArtefactsStored( - ctx context.Context, - fields logtrace.Fields, - _ codec.Layout, - send func(resp *RegisterResponse) error, -) { - if fields == nil { - fields = logtrace.Fields{} - } - - // Emit a minimal event message (metrics system removed) - msg := "Artefacts stored" - logtrace.Debug(ctx, "artefacts have been stored", fields) - task.streamEvent(SupernodeEventTypeArtefactsStored, msg, "", send) -} - -// Removed legacy helpers; functionality is centralized in cascadekit. - -// - -// verifyActionFee checks if the action fee is sufficient for the given data size -// It fetches action parameters, calculates the required fee, and compares it with the action price -func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action *actiontypes.Action, dataSize int, fields logtrace.Fields) error { - dataSizeInKBs := dataSize / 1024 - fee, err := task.LumeraClient.GetActionFee(ctx, strconv.Itoa(dataSizeInKBs)) - if err != nil { - return task.wrapErr(ctx, "failed to get action fee", err, fields) - } - - // Parse fee amount from string to int64 - amount, err := strconv.ParseInt(fee.Amount, 10, 64) - if err != nil { - return task.wrapErr(ctx, "failed to parse fee amount", err, fields) - } - - // Calculate per-byte fee based on data size - requiredFee := sdk.NewCoin("ulume", math.NewInt(amount)) - - // Log the calculated fee - logtrace.Debug(ctx, "calculated required fee", logtrace.Fields{ - "fee": requiredFee.String(), - "dataBytes": dataSize, - }) - // Check if action price is less than required fee - if action.Price.IsLT(requiredFee) { - return task.wrapErr( - ctx, - "insufficient fee", - fmt.Errorf("expected at least %s, got %s", requiredFee.String(), action.Price.String()), - fields, - ) - } - - return nil -} - -// - -// - -// - -// VerifyDownloadSignature verifies a download signature where the signed payload -// is actionID (creator address not included in the payload) -func (task *CascadeRegistrationTask) VerifyDownloadSignature(ctx context.Context, actionID, signature string) error { - fields := logtrace.Fields{ - logtrace.FieldActionID: actionID, - logtrace.FieldMethod: "VerifyDownloadSignature", - } - - // Get action details to extract creator address - actionDetails, err := task.LumeraClient.GetAction(ctx, actionID) - if err != nil { - return task.wrapErr(ctx, "failed to get action", err, fields) - } - - creatorAddress := actionDetails.GetAction().Creator - fields["creator_address"] = creatorAddress - - // Create the expected signature data: actionID (creator address not included in payload) - signatureData := fmt.Sprintf("%s", actionID) - fields["signature_data"] = signatureData - - // Decode the base64 signature - signatureBytes, err := base64.StdEncoding.DecodeString(signature) - if err != nil { - return task.wrapErr(ctx, "failed to decode signature from base64", err, fields) - } - - // Verify the signature using Lumera client - if err := task.LumeraClient.Verify(ctx, creatorAddress, []byte(signatureData), signatureBytes); err != nil { - return task.wrapErr(ctx, "failed to verify download signature", err, fields) - } - - logtrace.Debug(ctx, "download signature successfully verified", fields) - return nil -} diff --git a/supernode/services/cascade/mocks/cascade_interfaces_mock.go b/supernode/services/cascade/mocks/cascade_interfaces_mock.go deleted file mode 100644 index 44d3189c..00000000 --- a/supernode/services/cascade/mocks/cascade_interfaces_mock.go +++ /dev/null @@ -1,115 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: interfaces.go - -// Package cascademocks is a generated GoMock package. -package cascademocks - -import ( - context "context" - reflect "reflect" - - cascade "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - gomock "github.com/golang/mock/gomock" -) - -// MockCascadeServiceFactory is a mock of CascadeServiceFactory interface. -type MockCascadeServiceFactory struct { - ctrl *gomock.Controller - recorder *MockCascadeServiceFactoryMockRecorder -} - -// MockCascadeServiceFactoryMockRecorder is the mock recorder for MockCascadeServiceFactory. -type MockCascadeServiceFactoryMockRecorder struct { - mock *MockCascadeServiceFactory -} - -// NewMockCascadeServiceFactory creates a new mock instance. -func NewMockCascadeServiceFactory(ctrl *gomock.Controller) *MockCascadeServiceFactory { - mock := &MockCascadeServiceFactory{ctrl: ctrl} - mock.recorder = &MockCascadeServiceFactoryMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCascadeServiceFactory) EXPECT() *MockCascadeServiceFactoryMockRecorder { - return m.recorder -} - -// NewCascadeRegistrationTask mocks base method. -func (m *MockCascadeServiceFactory) NewCascadeRegistrationTask() cascade.CascadeTask { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewCascadeRegistrationTask") - ret0, _ := ret[0].(cascade.CascadeTask) - return ret0 -} - -// NewCascadeRegistrationTask indicates an expected call of NewCascadeRegistrationTask. -func (mr *MockCascadeServiceFactoryMockRecorder) NewCascadeRegistrationTask() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewCascadeRegistrationTask", reflect.TypeOf((*MockCascadeServiceFactory)(nil).NewCascadeRegistrationTask)) -} - -// MockCascadeTask is a mock of CascadeTask interface. -type MockCascadeTask struct { - ctrl *gomock.Controller - recorder *MockCascadeTaskMockRecorder -} - -// MockCascadeTaskMockRecorder is the mock recorder for MockCascadeTask. -type MockCascadeTaskMockRecorder struct { - mock *MockCascadeTask -} - -// NewMockCascadeTask creates a new mock instance. -func NewMockCascadeTask(ctrl *gomock.Controller) *MockCascadeTask { - mock := &MockCascadeTask{ctrl: ctrl} - mock.recorder = &MockCascadeTaskMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCascadeTask) EXPECT() *MockCascadeTaskMockRecorder { - return m.recorder -} - -// CleanupDownload mocks base method. -func (m *MockCascadeTask) CleanupDownload(ctx context.Context, actionID string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CleanupDownload", ctx, actionID) - ret0, _ := ret[0].(error) - return ret0 -} - -// CleanupDownload indicates an expected call of CleanupDownload. -func (mr *MockCascadeTaskMockRecorder) CleanupDownload(ctx, actionID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupDownload", reflect.TypeOf((*MockCascadeTask)(nil).CleanupDownload), ctx, actionID) -} - -// Download mocks base method. -func (m *MockCascadeTask) Download(ctx context.Context, req *cascade.DownloadRequest, send func(*cascade.DownloadResponse) error) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Download", ctx, req, send) - ret0, _ := ret[0].(error) - return ret0 -} - -// Download indicates an expected call of Download. -func (mr *MockCascadeTaskMockRecorder) Download(ctx, req, send interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockCascadeTask)(nil).Download), ctx, req, send) -} - -// Register mocks base method. -func (m *MockCascadeTask) Register(ctx context.Context, req *cascade.RegisterRequest, send func(*cascade.RegisterResponse) error) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Register", ctx, req, send) - ret0, _ := ret[0].(error) - return ret0 -} - -// Register indicates an expected call of Register. -func (mr *MockCascadeTaskMockRecorder) Register(ctx, req, send interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockCascadeTask)(nil).Register), ctx, req, send) -} diff --git a/supernode/services/cascade/register_test.go b/supernode/services/cascade/register_test.go deleted file mode 100644 index 6f56791a..00000000 --- a/supernode/services/cascade/register_test.go +++ /dev/null @@ -1,315 +0,0 @@ -package cascade_test - -import ( - "context" - "encoding/base64" - "encoding/hex" - "encoding/json" - "os" - "testing" - - sdkmath "cosmossdk.io/math" - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" - codecpkg "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - cascadeadaptormocks "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors/mocks" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" - sdk "github.com/cosmos/cosmos-sdk/types" - sdktx "github.com/cosmos/cosmos-sdk/types/tx" - "github.com/cosmos/gogoproto/proto" - "lukechampine.com/blake3" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -func TestCascadeRegistrationTask_Register(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // Setup input file - tmpFile, err := os.CreateTemp("", "cascade-test-input") - assert.NoError(t, err) - - _, _ = tmpFile.WriteString("mock data") - - err = tmpFile.Close() // ✅ ensure it's flushed to disk - assert.NoError(t, err) - - rawHash, b64Hash := blake3HashRawAndBase64(t, tmpFile.Name()) - - tests := []struct { - name string - setupMocks func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) - expectedError string - expectedEvents int - }{ - { - name: "happy path", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) { - - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(&actiontypes.QueryGetActionResponse{ - Action: &actiontypes.Action{ - ActionID: "action123", - Creator: "creator1", - BlockHeight: 100, - Metadata: encodedCascadeMetadata(b64Hash, t), - Price: &sdk.Coin{ - Denom: "ulume", - Amount: sdkmath.NewInt(1000), - }, - }, - }, nil) - - // 2. Top SNs - lc.EXPECT(). - GetTopSupernodes(gomock.Any(), uint64(100)). - Return(&sntypes.QueryGetTopSuperNodesForBlockResponse{ - Supernodes: []*sntypes.SuperNode{ - { - SupernodeAccount: "lumera1abcxyz", // must match task.config.SupernodeAccountAddress - }, - }, - }, nil) - - // 3. Signature verification - layout signature on layout file - // Expect two verification calls: creator signature and layout signature - lc.EXPECT(). - Verify(gomock.Any(), "creator1", gomock.Any(), gomock.Any()). - Return(nil). - Times(2) - - // 4. Simulate finalize should pass - lc.EXPECT(). - SimulateFinalizeAction(gomock.Any(), "action123", gomock.Any()). - Return(&sdktx.SimulateResponse{}, nil) - - // 5. Finalize - lc.EXPECT(). - FinalizeAction(gomock.Any(), "action123", gomock.Any()). - Return(&sdktx.BroadcastTxResponse{TxResponse: &sdk.TxResponse{TxHash: "tx123"}}, nil) - - // 6. Params (if used in fee check) - lc.EXPECT().GetActionFee(gomock.Any(), "10").Return(&actiontypes.QueryGetActionFeeResponse{Amount: "1000"}, nil) - - // 7. Encode input - codec.EXPECT(). - EncodeInput(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Return(adaptors.EncodeResult{ - SymbolsDir: "/tmp", - Metadata: codecpkg.Layout{Blocks: []codecpkg.Block{{BlockID: 1, Hash: "abc"}}}, - }, nil) - - // 8. Store artefacts (no metrics returned; recorded centrally) - p2p.EXPECT(). - StoreArtefacts(gomock.Any(), gomock.Any(), gomock.Any()). - Return(nil) - }, - expectedError: "", - expectedEvents: 12, - }, - { - name: "get-action fails", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, _ *cascadeadaptormocks.MockCodecService, _ *cascadeadaptormocks.MockP2PService) { - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(nil, assert.AnError) - }, - expectedError: "assert.AnError general error", - expectedEvents: 0, - }, - { - name: "invalid data hash mismatch", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) { - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(&actiontypes.QueryGetActionResponse{ - Action: &actiontypes.Action{ - ActionID: "action123", - Creator: "creator1", - BlockHeight: 100, - Metadata: encodedCascadeMetadata("some-other-hash", t), // ⛔ incorrect hash - Price: &sdk.Coin{ - Denom: "ulume", - Amount: sdkmath.NewInt(1000), - }, - }, - }, nil) - - lc.EXPECT(). - GetTopSupernodes(gomock.Any(), uint64(100)). - Return(&sntypes.QueryGetTopSuperNodesForBlockResponse{ - Supernodes: []*sntypes.SuperNode{ - {SupernodeAccount: "lumera1abcxyz"}, - }, - }, nil) - - lc.EXPECT().GetActionFee(gomock.Any(), "10").Return(&actiontypes.QueryGetActionFeeResponse{Amount: "1000"}, nil) - }, - expectedError: "data hash doesn't match", - expectedEvents: 5, // up to metadata decoded - }, - { - name: "fee too low", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) { - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(&actiontypes.QueryGetActionResponse{ - Action: &actiontypes.Action{ - ActionID: "action123", - Creator: "creator1", - BlockHeight: 100, - Metadata: encodedCascadeMetadata(b64Hash, t), - Price: &sdk.Coin{ - Denom: "ulume", - Amount: sdkmath.NewInt(50), - }, - }, - }, nil) - - lc.EXPECT().GetActionFee(gomock.Any(), "10").Return(&actiontypes.QueryGetActionFeeResponse{Amount: "100"}, nil) - - }, - expectedError: "action fee is too low", - expectedEvents: 2, // until fee check - }, - { - name: "supernode not in top list", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) { - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(&actiontypes.QueryGetActionResponse{ - Action: &actiontypes.Action{ - ActionID: "action123", - Creator: "creator1", - BlockHeight: 100, - Metadata: encodedCascadeMetadata(b64Hash, t), - Price: &sdk.Coin{ - Denom: "ulume", - Amount: sdkmath.NewInt(1000), - }, - }, - }, nil) - - lc.EXPECT().GetActionFee(gomock.Any(), "10").Return(&actiontypes.QueryGetActionFeeResponse{Amount: "1000"}, nil) - - lc.EXPECT(). - GetTopSupernodes(gomock.Any(), uint64(100)). - Return(&sntypes.QueryGetTopSuperNodesForBlockResponse{ - Supernodes: []*sntypes.SuperNode{ - {SupernodeAccount: "other-supernode"}, - }, - }, nil) - }, - expectedError: "not eligible supernode", - expectedEvents: 2, // fails after fee verified - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - mockLumera := cascadeadaptormocks.NewMockLumeraClient(ctrl) - mockCodec := cascadeadaptormocks.NewMockCodecService(ctrl) - mockP2P := cascadeadaptormocks.NewMockP2PService(ctrl) - - tt.setupMocks(mockLumera, mockCodec, mockP2P) - - config := &cascade.Config{Config: common.Config{ - SupernodeAccountAddress: "lumera1abcxyz", - }, - } - - service := cascade.NewCascadeService( - config, - nil, nil, nil, nil, - ) - - service.LumeraClient = mockLumera - service.P2P = mockP2P - service.RQ = mockCodec - // Inject mocks for adaptors - task := cascade.NewCascadeRegistrationTask(service) - - req := &cascade.RegisterRequest{ - TaskID: "task1", - ActionID: "action123", - DataHash: rawHash, - DataSize: 10240, - FilePath: tmpFile.Name(), - } - - var events []cascade.RegisterResponse - err := task.Register(context.Background(), req, func(resp *cascade.RegisterResponse) error { - events = append(events, *resp) - return nil - }) - - if tt.expectedError != "" { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Len(t, events, tt.expectedEvents) - } - }) - } -} - -func encodedCascadeMetadata(hash string, t *testing.T) []byte { - t.Helper() - - // Fake layout signature for new index file format - fakeLayoutSig := base64.StdEncoding.EncodeToString([]byte("fakelayoutsignature")) - - // Create index file structure - indexFile := map[string]any{ - "layout_ids": []string{"layout_id_1", "layout_id_2"}, - "layout_signature": fakeLayoutSig, - } - indexFileJSON, _ := json.Marshal(indexFile) - fakeIndexFile := base64.StdEncoding.EncodeToString(indexFileJSON) - - // Fake creators signature - this is what the chain uses for index ID generation - fakeCreatorsSig := base64.StdEncoding.EncodeToString([]byte("fakecreatorssignature")) - - metadata := &actiontypes.CascadeMetadata{ - DataHash: hash, - FileName: "file.txt", - RqIdsIc: 2, - RqIdsMax: 4, - RqIdsIds: []string{"id1", "id2"}, - Signatures: fakeIndexFile + "." + fakeCreatorsSig, - } - - bytes, err := proto.Marshal(metadata) - if err != nil { - t.Fatalf("failed to marshal CascadeMetadata: %v", err) - } - - return bytes -} - -func blake3HashRawAndBase64(t *testing.T, path string) ([]byte, string) { - t.Helper() - - data, err := os.ReadFile(path) - if err != nil { - t.Fatal(err) - } - - hash := blake3.Sum256(data) - raw := hash[:] - b64 := base64.StdEncoding.EncodeToString(raw) - return raw, b64 -} - -func decodeHexOrDie(hexStr string) []byte { - bz, err := hex.DecodeString(hexStr) - if err != nil { - panic(err) - } - return bz -} diff --git a/supernode/services/cascade/service.go b/supernode/services/cascade/service.go deleted file mode 100644 index a1d9898b..00000000 --- a/supernode/services/cascade/service.go +++ /dev/null @@ -1,66 +0,0 @@ -package cascade - -import ( - "context" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/base" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" -) - -type CascadeService struct { - *base.SuperNodeService - config *Config - - LumeraClient adaptors.LumeraClient - P2P adaptors.P2PService - RQ adaptors.CodecService -} - -// Compile-time checks to ensure CascadeService implements required interfaces -var _ supernode.TaskProvider = (*CascadeService)(nil) -var _ CascadeServiceFactory = (*CascadeService)(nil) - -// NewCascadeRegistrationTask creates a new task for cascade registration -func (service *CascadeService) NewCascadeRegistrationTask() CascadeTask { - task := NewCascadeRegistrationTask(service) - service.Worker.AddTask(task) - return task -} - -// Run starts the service -func (service *CascadeService) Run(ctx context.Context) error { - return service.RunHelper(ctx, service.config.SupernodeAccountAddress, logPrefix) -} - -// GetServiceName returns the name of the cascade service -func (service *CascadeService) GetServiceName() string { - return "cascade" -} - -// GetRunningTasks returns a list of currently running task IDs -func (service *CascadeService) GetRunningTasks() []string { - var taskIDs []string - for _, t := range service.Worker.Tasks() { - // Include only tasks that are not in a final state - if st := t.Status(); st != nil && st.SubStatus != nil && !st.SubStatus.IsFinal() { - taskIDs = append(taskIDs, t.ID()) - } - } - return taskIDs -} - -// NewCascadeService returns a new CascadeService instance -func NewCascadeService(config *Config, lumera lumera.Client, p2pClient p2p.Client, codec codec.Codec, rqstore rqstore.Store) *CascadeService { - return &CascadeService{ - config: config, - SuperNodeService: base.NewSuperNodeService(p2pClient), - LumeraClient: adaptors.NewLumeraClient(lumera), - P2P: adaptors.NewP2PService(p2pClient, rqstore), - RQ: adaptors.NewCodecService(codec), - } -} diff --git a/supernode/services/cascade/service_test.go b/supernode/services/cascade/service_test.go deleted file mode 100644 index bc2998ad..00000000 --- a/supernode/services/cascade/service_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package cascade_test - -import ( - "context" - "testing" - "time" - - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - cascadeadaptormocks "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors/mocks" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -func TestNewCascadeService(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockLumera := cascadeadaptormocks.NewMockLumeraClient(ctrl) - mockP2P := cascadeadaptormocks.NewMockP2PService(ctrl) - mockCodec := cascadeadaptormocks.NewMockCodecService(ctrl) - - config := &cascade.Config{ - Config: common.Config{ - SupernodeAccountAddress: "lumera1abcxyz", - }, - } - - service := cascade.NewCascadeService(config, nil, nil, nil, nil) - service.LumeraClient = mockLumera - service.RQ = mockCodec - service.P2P = mockP2P - - assert.NotNil(t, service) - assert.NotNil(t, service.LumeraClient) - assert.NotNil(t, service.P2P) - assert.NotNil(t, service.RQ) -} - -func TestNewCascadeRegistrationTask(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockLumera := cascadeadaptormocks.NewMockLumeraClient(ctrl) - mockP2P := cascadeadaptormocks.NewMockP2PService(ctrl) - mockCodec := cascadeadaptormocks.NewMockCodecService(ctrl) - - config := &cascade.Config{ - Config: common.Config{ - SupernodeAccountAddress: "lumera1abcxyz", - }, - } - - service := cascade.NewCascadeService(config, nil, nil, nil, nil) - service.LumeraClient = mockLumera - service.RQ = mockCodec - service.P2P = mockP2P - - task := cascade.NewCascadeRegistrationTask(service) - assert.NotNil(t, task) - - go func() { - service.Worker.AddTask(task) - }() - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - defer cancel() - - err := service.RunHelper(ctx, "node-id", "prefix") - assert.NoError(t, err) -} diff --git a/supernode/services/cascade/status.go b/supernode/services/cascade/status.go deleted file mode 100644 index b5633a45..00000000 --- a/supernode/services/cascade/status.go +++ /dev/null @@ -1,22 +0,0 @@ -package cascade - -import ( - "context" - - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" -) - -// StatusResponse represents the status response for cascade service -type StatusResponse = supernode.StatusResponse - -// GetStatus delegates to the common supernode status service -func (service *CascadeService) GetStatus(ctx context.Context) (StatusResponse, error) { - // Create a status service and register the cascade service as a task provider - // Pass nil for optional dependencies (P2P, lumera client, and config) - // as cascade service doesn't have access to them in this context - statusService := supernode.NewSupernodeStatusService(nil, nil, nil) - statusService.RegisterTaskProvider(service) - - // Get the status from the common service - return statusService.GetStatus(ctx, false) -} diff --git a/supernode/services/cascade/status_test.go b/supernode/services/cascade/status_test.go deleted file mode 100644 index 0c1b04fd..00000000 --- a/supernode/services/cascade/status_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package cascade - -import ( - "context" - "testing" - - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/base" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" - "github.com/stretchr/testify/assert" -) - -func TestGetStatus(t *testing.T) { - ctx := context.Background() - - tests := []struct { - name string - taskCount int - expectErr bool - expectTasks int - }{ - { - name: "no tasks", - taskCount: 0, - expectErr: false, - expectTasks: 0, - }, - { - name: "one task", - taskCount: 1, - expectErr: false, - expectTasks: 1, - }, - { - name: "multiple tasks", - taskCount: 3, - expectErr: false, - expectTasks: 3, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Setup service and worker - service := &CascadeService{ - SuperNodeService: base.NewSuperNodeService(nil), - } - - go func() { - service.RunHelper(ctx, "node-id", "prefix") - }() - - // Register tasks - for i := 0; i < tt.taskCount; i++ { - task := NewCascadeRegistrationTask(service) - service.Worker.AddTask(task) - } - - // Call GetStatus from service - resp, err := service.GetStatus(ctx) - if tt.expectErr { - assert.Error(t, err) - return - } - - assert.NoError(t, err) - - // Version check - assert.NotEmpty(t, resp.Version) - - // Uptime check - assert.True(t, resp.UptimeSeconds >= 0) - - // CPU checks - assert.True(t, resp.Resources.CPU.UsagePercent >= 0) - assert.True(t, resp.Resources.CPU.UsagePercent <= 100) - assert.True(t, resp.Resources.CPU.Cores >= 0) - - // Memory checks (now in GB) - assert.True(t, resp.Resources.Memory.TotalGB > 0) - assert.True(t, resp.Resources.Memory.UsedGB <= resp.Resources.Memory.TotalGB) - assert.True(t, resp.Resources.Memory.UsagePercent >= 0 && resp.Resources.Memory.UsagePercent <= 100) - - // Hardware summary check - if resp.Resources.CPU.Cores > 0 && resp.Resources.Memory.TotalGB > 0 { - assert.NotEmpty(t, resp.Resources.HardwareSummary) - } - - // Storage checks - should have default root filesystem - assert.NotEmpty(t, resp.Resources.Storage) - assert.Equal(t, "/", resp.Resources.Storage[0].Path) - - // Registered services check - assert.Contains(t, resp.RegisteredServices, "cascade") - - // Check new fields have default values (since service doesn't have access to P2P/lumera/config) - assert.Equal(t, int32(0), resp.Network.PeersCount) - assert.Empty(t, resp.Network.PeerAddresses) - assert.Equal(t, int32(0), resp.Rank) - assert.Empty(t, resp.IPAddress) - - // Task count check - look for cascade service in the running tasks list - var cascadeService *supernode.ServiceTasks - for _, service := range resp.RunningTasks { - if service.ServiceName == "cascade" { - cascadeService = &service - break - } - } - - if tt.expectTasks > 0 { - assert.NotNil(t, cascadeService, "cascade service should be present") - assert.Equal(t, tt.expectTasks, int(cascadeService.TaskCount)) - assert.Equal(t, tt.expectTasks, len(cascadeService.TaskIDs)) - } else { - // If no tasks expected, either no cascade service or empty task count - if cascadeService != nil { - assert.Equal(t, 0, int(cascadeService.TaskCount)) - } - } - }) - } -} diff --git a/supernode/services/cascade/task.go b/supernode/services/cascade/task.go deleted file mode 100644 index 5dcffa34..00000000 --- a/supernode/services/cascade/task.go +++ /dev/null @@ -1,58 +0,0 @@ -package cascade - -import ( - "context" - - "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/base" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/storage" -) - -// CascadeRegistrationTask is the task for cascade registration -type CascadeRegistrationTask struct { - *CascadeService - - *base.SuperNodeTask - storage *storage.StorageHandler - - Asset *files.File - dataHash string - creatorSignature []byte -} - -const ( - logPrefix = "cascade" -) - -// Compile-time check to ensure CascadeRegistrationTask implements CascadeTask interface -var _ CascadeTask = (*CascadeRegistrationTask)(nil) - -// Run starts the task -func (task *CascadeRegistrationTask) Run(ctx context.Context) error { - return task.RunHelper(ctx, task.removeArtifacts) -} - -// removeArtifacts cleans up any files created during processing -func (task *CascadeRegistrationTask) removeArtifacts() { - task.RemoveFile(task.Asset) -} - -// NewCascadeRegistrationTask returns a new Task instance -func NewCascadeRegistrationTask(service *CascadeService) *CascadeRegistrationTask { - task := &CascadeRegistrationTask{ - SuperNodeTask: base.NewSuperNodeTask(logPrefix), - CascadeService: service, - } - - return task -} - -func (task *CascadeRegistrationTask) streamEvent(eventType SupernodeEventType, msg, txHash string, send func(resp *RegisterResponse) error) { - _ = send(&RegisterResponse{ - EventType: eventType, - Message: msg, - TxHash: txHash, - }) - - return -} diff --git a/supernode/services/common/base/supernode_service.go b/supernode/services/common/base/supernode_service.go deleted file mode 100644 index 424556b0..00000000 --- a/supernode/services/common/base/supernode_service.go +++ /dev/null @@ -1,70 +0,0 @@ -package base - -import ( - "context" - "time" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/common/task" - "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" -) - -// SuperNodeServiceInterface common interface for Services -type SuperNodeServiceInterface interface { - RunHelper(ctx context.Context) error - NewTask() task.Task - Task(id string) task.Task -} - -// SuperNodeService common "class" for Services -type SuperNodeService struct { - *task.Worker - P2PClient p2p.Client -} - -// run starts task -func (service *SuperNodeService) run(ctx context.Context, nodeID string, prefix string) error { - ctx = logtrace.CtxWithCorrelationID(ctx, prefix) - - if nodeID == "" { - return errors.New("PastelID is not specified in the config file") - } - - group, ctx := errgroup.WithContext(ctx) - group.Go(func() error { - return service.Worker.Run(ctx) - }) - - return group.Wait() -} - -// RunHelper common code for Service runner -func (service *SuperNodeService) RunHelper(ctx context.Context, nodeID string, prefix string) error { - for { - select { - case <-ctx.Done(): - logtrace.Error(ctx, "context done - closing sn services", logtrace.Fields{logtrace.FieldModule: "supernode"}) - return nil - case <-time.After(5 * time.Second): - if err := service.run(ctx, nodeID, prefix); err != nil { - service.Worker = task.NewWorker() - logtrace.Error(ctx, "Service run failed, retrying", logtrace.Fields{logtrace.FieldModule: "supernode", logtrace.FieldError: err.Error()}) - } else { - logtrace.Debug(ctx, "Service run completed successfully - closing sn services", logtrace.Fields{logtrace.FieldModule: "supernode"}) - return nil - } - } - } -} - -// NewSuperNodeService creates SuperNodeService -func NewSuperNodeService( - p2pClient p2p.Client, -) *SuperNodeService { - return &SuperNodeService{ - Worker: task.NewWorker(), - P2PClient: p2pClient, - } -} diff --git a/supernode/services/common/base/supernode_task.go b/supernode/services/common/base/supernode_task.go deleted file mode 100644 index 2908558d..00000000 --- a/supernode/services/common/base/supernode_task.go +++ /dev/null @@ -1,71 +0,0 @@ -package base - -import ( - "context" - "fmt" - - "github.com/LumeraProtocol/supernode/v2/pkg/common/task" - "github.com/LumeraProtocol/supernode/v2/pkg/common/task/state" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" -) - -// TaskCleanerFunc pointer to func that removes artefacts -type TaskCleanerFunc func() - -// SuperNodeTask base "class" for Task -type SuperNodeTask struct { - task.Task - - LogPrefix string -} - -// RunHelper common code for Task runner -func (task *SuperNodeTask) RunHelper(ctx context.Context, clean TaskCleanerFunc) error { - ctx = task.context(ctx) - logtrace.Debug(ctx, "Start task", logtrace.Fields{}) - defer logtrace.Debug(ctx, "Task canceled", logtrace.Fields{}) - defer task.Cancel() - - task.SetStatusNotifyFunc(func(status *state.Status) { - logtrace.Debug(ctx, "States updated", logtrace.Fields{"status": status.String()}) - }) - - defer clean() - - err := task.RunAction(ctx) - - // Update task status based on completion result - if err != nil { - task.UpdateStatus(common.StatusTaskCanceled) - } else { - task.UpdateStatus(common.StatusTaskCompleted) - } - - return err -} - -func (task *SuperNodeTask) context(ctx context.Context) context.Context { - return logtrace.CtxWithCorrelationID(ctx, fmt.Sprintf("%s-%s", task.LogPrefix, task.ID())) -} - -// RemoveFile removes file from FS (TODO: move to gonode.common) -func (task *SuperNodeTask) RemoveFile(file *files.File) { - if file != nil { - logtrace.Debug(context.Background(), "remove file", logtrace.Fields{"filename": file.Name()}) - if err := file.Remove(); err != nil { - logtrace.Debug(context.Background(), "remove file failed", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - } -} - -// NewSuperNodeTask returns a new Task instance. -func NewSuperNodeTask(logPrefix string) *SuperNodeTask { - snt := &SuperNodeTask{ - Task: task.New(common.StatusTaskStarted), - LogPrefix: logPrefix, - } - - return snt -} diff --git a/supernode/services/common/base/supernode_task_test.go b/supernode/services/common/base/supernode_task_test.go deleted file mode 100644 index 9e108f59..00000000 --- a/supernode/services/common/base/supernode_task_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package base - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestNewSuperNodeTask(t *testing.T) { - task := NewSuperNodeTask("testprefix") - assert.NotNil(t, task) - assert.Equal(t, "testprefix", task.LogPrefix) -} - -func TestSuperNodeTask_RunHelper(t *testing.T) { - called := false - cleaner := func() { - called = true - } - - snt := NewSuperNodeTask("log") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Run the helper in a goroutine - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - err := snt.RunHelper(ctx, cleaner) - assert.NoError(t, err) - }() - - // Give the RunHelper some time to start and block on actionCh - time.Sleep(10 * time.Millisecond) - - // Submit dummy action to allow RunAction to proceed - done := snt.NewAction(func(ctx context.Context) error { - return nil - }) - - <-done // wait for action to complete - - snt.CloseActionCh() // close to allow RunAction to return - wg.Wait() // wait for RunHelper to exit - - assert.True(t, called) -} - -func TestSuperNodeTask_RunHelper_WithError(t *testing.T) { - snt := NewSuperNodeTask("log") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var wg sync.WaitGroup - wg.Add(1) - - var runErr error - go func() { - defer wg.Done() - runErr = snt.RunHelper(ctx, func() {}) - }() - - // Give RunHelper time to start - time.Sleep(10 * time.Millisecond) - - done := snt.NewAction(func(ctx context.Context) error { - return fmt.Errorf("fail") - }) - - <-done // wait for the action to complete - snt.CloseActionCh() // allow RunAction to exit - wg.Wait() // wait for RunHelper to return - - assert.EqualError(t, runErr, "fail") -} diff --git a/supernode/services/common/config.go b/supernode/services/common/config.go deleted file mode 100644 index 684d1fd1..00000000 --- a/supernode/services/common/config.go +++ /dev/null @@ -1,19 +0,0 @@ -package common - -const ( - defaultNumberSuperNodes = 10 -) - -// Config contains common configuration of the services. -type Config struct { - SupernodeAccountAddress string - SupernodeIPAddress string - NumberSuperNodes int -} - -// NewConfig returns a new Config instance -func NewConfig() *Config { - return &Config{ - NumberSuperNodes: defaultNumberSuperNodes, - } -} diff --git a/supernode/services/common/storage/handler.go b/supernode/services/common/storage/handler.go deleted file mode 100644 index 9e570d03..00000000 --- a/supernode/services/common/storage/handler.go +++ /dev/null @@ -1,180 +0,0 @@ -package storage - -import ( - "context" - "fmt" - "io/fs" - "math" - "math/rand/v2" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" -) - -const ( - loadSymbolsBatchSize = 2500 - storeSymbolsPercent = 10 - concurrency = 1 - - UnknownDataType = iota // 1 - P2PDataRaptorQSymbol // 1 - P2PDataCascadeMetadata // 2 -) - -// StorageHandler provides common logic for RQ and P2P operations -type StorageHandler struct { - P2PClient p2p.Client - rqDir string - - TaskID string - TxID string - - store rqstore.Store - semaphore chan struct{} -} - -// NewStorageHandler creates instance of StorageHandler -func NewStorageHandler(p2p p2p.Client, rqDir string, store rqstore.Store) *StorageHandler { - return &StorageHandler{ - P2PClient: p2p, - rqDir: rqDir, - store: store, - semaphore: make(chan struct{}, concurrency), - } -} - -// StoreFileIntoP2P stores file into P2P -func (h *StorageHandler) StoreFileIntoP2P(ctx context.Context, file *files.File, typ int) (string, error) { - data, err := file.Bytes() - if err != nil { - return "", errors.Errorf("store file %s into p2p", file.Name()) - } - return h.StoreBytesIntoP2P(ctx, data, typ) -} - -// StoreBytesIntoP2P into P2P actual data -func (h *StorageHandler) StoreBytesIntoP2P(ctx context.Context, data []byte, typ int) (string, error) { - return h.P2PClient.Store(ctx, data, typ) -} - -// StoreBatch stores into P2P an array of byte slices. -func (h *StorageHandler) StoreBatch(ctx context.Context, list [][]byte, typ int) error { - val := ctx.Value(logtrace.CorrelationIDKey) - taskID := "" - if val != nil { - taskID = fmt.Sprintf("%v", val) - } - - logtrace.Debug(ctx, "task_id in storeList", logtrace.Fields{logtrace.FieldTaskID: taskID}) - return h.P2PClient.StoreBatch(ctx, list, typ, taskID) -} - -// StoreRaptorQSymbolsIntoP2P stores RaptorQ symbols into P2P -// It first records the directory in the database, then gathers all symbol paths -// under the specified directory. If the number of keys exceeds a certain threshold, -// it randomly samples a percentage of them. Finally, it streams the symbols in -// fixed-size batches to the P2P network. -// -// Note: P2P client returns (ratePct, requests, err) for each batch; we ignore -// the metrics here and only validate error semantics. -func (h *StorageHandler) StoreRaptorQSymbolsIntoP2P(ctx context.Context, taskID, symbolsDir string) error { - /* record directory in DB */ - if err := h.store.StoreSymbolDirectory(taskID, symbolsDir); err != nil { - return fmt.Errorf("store symbol dir: %w", err) - } - - /* gather every symbol path under symbolsDir ------------------------- */ - keys, err := walkSymbolTree(symbolsDir) - if err != nil { - return err - } - - /* down-sample if we exceed the "big directory" threshold ------------- */ - if len(keys) > loadSymbolsBatchSize { - want := int(math.Ceil(float64(len(keys)) * storeSymbolsPercent / 100)) - if want < len(keys) { - rand.Shuffle(len(keys), func(i, j int) { keys[i], keys[j] = keys[j], keys[i] }) - keys = keys[:want] - } - sort.Strings(keys) // deterministic order inside the sample - } - - logtrace.Debug(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)}) - - /* stream in fixed-size batches -------------------------------------- */ - for start := 0; start < len(keys); { - end := start + loadSymbolsBatchSize - if end > len(keys) { - end = len(keys) - } - if err := h.storeSymbolsInP2P(ctx, taskID, symbolsDir, keys[start:end]); err != nil { - return err - } - start = end - } - - if err := h.store.UpdateIsFirstBatchStored(h.TxID); err != nil { - return fmt.Errorf("update first-batch flag: %w", err) - } - - logtrace.Debug(ctx, "finished storing RaptorQ symbols", logtrace.Fields{"curr-time": time.Now().UTC(), "count": len(keys)}) - - return nil -} - -func walkSymbolTree(root string) ([]string, error) { - var keys []string - err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err // propagate I/O errors - } - if d.IsDir() { - return nil // skip directory nodes - } - // ignore layout json if present - if strings.EqualFold(filepath.Ext(d.Name()), ".json") { - return nil - } - rel, err := filepath.Rel(root, path) - if err != nil { - return err - } - keys = append(keys, rel) // store as "block_0/filename" - return nil - }) - if err != nil { - return nil, fmt.Errorf("walk symbol tree: %w", err) - } - return keys, nil -} - -func (h *StorageHandler) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) error { - logtrace.Debug(ctx, "loading batch symbols", logtrace.Fields{"count": len(fileKeys)}) - - symbols, err := utils.LoadSymbols(root, fileKeys) - if err != nil { - return fmt.Errorf("load symbols: %w", err) - } - - if err := h.P2PClient.StoreBatch(ctx, symbols, P2PDataRaptorQSymbol, taskID); err != nil { - return fmt.Errorf("p2p store batch: %w", err) - } - - logtrace.Debug(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)}) - - if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { - return fmt.Errorf("delete symbols: %w", err) - } - - logtrace.Debug(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)}) - - return nil -} diff --git a/supernode/services/common/storage/handler_test.go b/supernode/services/common/storage/handler_test.go deleted file mode 100644 index fd4e0d8e..00000000 --- a/supernode/services/common/storage/handler_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package storage - -import ( - "context" - "testing" - - "github.com/LumeraProtocol/supernode/v2/p2p/mocks" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -// --- Mocks --- - -type mockP2PClient struct { - mocks.Client -} - -type mockStore struct { - mock.Mock -} - -func (m *mockStore) StoreSymbolDirectory(taskID, dir string) error { - args := m.Called(taskID, dir) - return args.Error(0) -} - -func (m *mockStore) UpdateIsFirstBatchStored(txID string) error { - args := m.Called(txID) - return args.Error(0) -} - -func TestStoreBytesIntoP2P(t *testing.T) { - p2pClient := new(mockP2PClient) - handler := NewStorageHandler(p2pClient, "", nil) - - data := []byte("hello") - p2pClient.On("Store", mock.Anything, data, 1).Return("some-id", nil) - - id, err := handler.StoreBytesIntoP2P(context.Background(), data, 1) - assert.NoError(t, err) - assert.Equal(t, "some-id", id) - p2pClient.AssertExpectations(t) -} - -func TestStoreBatch(t *testing.T) { - p2pClient := new(mockP2PClient) - handler := NewStorageHandler(p2pClient, "", nil) - - ctx := context.WithValue(context.Background(), "task_id", "123") - list := [][]byte{[]byte("a"), []byte("b")} - // StoreBatch now returns error only - p2pClient.On("StoreBatch", mock.Anything, list, 3, "").Return(nil) - - err := handler.StoreBatch(ctx, list, 3) - assert.NoError(t, err) -} diff --git a/supernode/services/common/supernode/service.go b/supernode/services/common/supernode/service.go deleted file mode 100644 index 8569470b..00000000 --- a/supernode/services/common/supernode/service.go +++ /dev/null @@ -1,294 +0,0 @@ -package supernode - -import ( - "context" - "fmt" - "time" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/supernode/config" -) - -// Version is the supernode version, set by the main application -var Version = "dev" - -// SupernodeStatusService provides centralized status information -// by collecting system metrics and aggregating task information from registered services -type SupernodeStatusService struct { - taskProviders []TaskProvider // List of registered services that provide task information - metrics *MetricsCollector // System metrics collector for CPU and memory stats - storagePaths []string // Paths to monitor for storage metrics - startTime time.Time // Service start time for uptime calculation - p2pService p2p.Client // P2P service for network information - lumeraClient lumera.Client // Lumera client for blockchain queries - config *config.Config // Supernode configuration -} - -// NewSupernodeStatusService creates a new supernode status service instance -func NewSupernodeStatusService(p2pService p2p.Client, lumeraClient lumera.Client, cfg *config.Config) *SupernodeStatusService { - return &SupernodeStatusService{ - taskProviders: make([]TaskProvider, 0), - metrics: NewMetricsCollector(), - storagePaths: []string{"/"}, // Default to monitoring root filesystem - startTime: time.Now(), - p2pService: p2pService, - lumeraClient: lumeraClient, - config: cfg, - } -} - -// RegisterTaskProvider registers a service as a task provider -// This allows the service to report its running tasks in status responses -func (s *SupernodeStatusService) RegisterTaskProvider(provider TaskProvider) { - s.taskProviders = append(s.taskProviders, provider) -} - -// GetStatus returns the current system status including all registered services -// This method collects CPU metrics, memory usage, and task information from all providers -func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetrics bool) (StatusResponse, error) { - fields := logtrace.Fields{ - logtrace.FieldMethod: "GetStatus", - logtrace.FieldModule: "SupernodeStatusService", - } - logtrace.Debug(ctx, "status request received", fields) - - var resp StatusResponse - resp.Version = Version - - // Calculate uptime - resp.UptimeSeconds = uint64(time.Since(s.startTime).Seconds()) - - // Collect CPU metrics - cpuUsage, err := s.metrics.CollectCPUMetrics(ctx) - if err != nil { - return resp, err - } - resp.Resources.CPU.UsagePercent = cpuUsage - - // Get CPU cores - cpuCores, err := s.metrics.GetCPUCores(ctx) - if err != nil { - // Log error but continue - non-critical - logtrace.Error(ctx, "failed to get cpu cores", logtrace.Fields{logtrace.FieldError: err.Error()}) - cpuCores = 0 - } - resp.Resources.CPU.Cores = cpuCores - - // Collect memory metrics - memTotal, memUsed, memAvailable, memUsedPerc, err := s.metrics.CollectMemoryMetrics(ctx) - if err != nil { - return resp, err - } - - // Convert to GB - const bytesToGB = 1024 * 1024 * 1024 - resp.Resources.Memory.TotalGB = float64(memTotal) / bytesToGB - resp.Resources.Memory.UsedGB = float64(memUsed) / bytesToGB - resp.Resources.Memory.AvailableGB = float64(memAvailable) / bytesToGB - resp.Resources.Memory.UsagePercent = memUsedPerc - - // Generate hardware summary - if cpuCores > 0 && resp.Resources.Memory.TotalGB > 0 { - resp.Resources.HardwareSummary = fmt.Sprintf("%d cores / %.0fGB RAM", cpuCores, resp.Resources.Memory.TotalGB) - } - - // Collect storage metrics - resp.Resources.Storage = s.metrics.CollectStorageMetrics(ctx, s.storagePaths) - - // Collect service information from all registered providers - resp.RunningTasks = make([]ServiceTasks, 0, len(s.taskProviders)) - resp.RegisteredServices = make([]string, 0, len(s.taskProviders)) - - for _, provider := range s.taskProviders { - serviceName := provider.GetServiceName() - tasks := provider.GetRunningTasks() - - // Add to registered services list - resp.RegisteredServices = append(resp.RegisteredServices, serviceName) - - // Add all services to running tasks (even with 0 tasks) - serviceTask := ServiceTasks{ - ServiceName: serviceName, - TaskIDs: tasks, - TaskCount: int32(len(tasks)), - } - resp.RunningTasks = append(resp.RunningTasks, serviceTask) - } - - // Initialize network info - resp.Network = NetworkInfo{ - PeersCount: 0, - PeerAddresses: []string{}, - } - - // Prepare P2P metrics container (always present in response) - metrics := P2PMetrics{ - NetworkHandleMetrics: map[string]HandleCounters{}, - ConnPoolMetrics: map[string]int64{}, - BanList: []BanEntry{}, - } - - // Collect P2P network information and metrics (fill when available and requested) - if includeP2PMetrics && s.p2pService != nil { - p2pStats, err := s.p2pService.Stats(ctx) - if err != nil { - // Log error but continue - non-critical - logtrace.Error(ctx, "failed to get p2p stats", logtrace.Fields{logtrace.FieldError: err.Error()}) - } else { - if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { - if peersCount, ok := dhtStats["peers_count"].(int); ok { - resp.Network.PeersCount = int32(peersCount) - } - - // Extract peer addresses - if peers, ok := dhtStats["peers"].([]*kademlia.Node); ok { - resp.Network.PeerAddresses = make([]string, 0, len(peers)) - for _, peer := range peers { - // Format peer address as "ID@IP:Port" - peerAddr := fmt.Sprintf("%s@%s:%d", string(peer.ID), peer.IP, peer.Port) - resp.Network.PeerAddresses = append(resp.Network.PeerAddresses, peerAddr) - } - } else { - resp.Network.PeerAddresses = []string{} - } - } - - // Disk info - if du, ok := p2pStats["disk-info"].(utils.DiskStatus); ok { - metrics.Disk = DiskStatus{AllMB: du.All, UsedMB: du.Used, FreeMB: du.Free} - } else if duPtr, ok := p2pStats["disk-info"].(*utils.DiskStatus); ok && duPtr != nil { - metrics.Disk = DiskStatus{AllMB: duPtr.All, UsedMB: duPtr.Used, FreeMB: duPtr.Free} - } - - // Ban list - if bans, ok := p2pStats["ban-list"].([]kademlia.BanSnapshot); ok { - for _, b := range bans { - metrics.BanList = append(metrics.BanList, BanEntry{ - ID: b.ID, - IP: b.IP, - Port: uint32(b.Port), - Count: int32(b.Count), - CreatedAtUnix: b.CreatedAt.Unix(), - AgeSeconds: int64(b.Age.Seconds()), - }) - } - } - - // Conn pool metrics - if pool, ok := p2pStats["conn-pool"].(map[string]int64); ok { - for k, v := range pool { - metrics.ConnPoolMetrics[k] = v - } - } - - // DHT metrics and database/network counters live inside dht map - if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { - // Database - if db, ok := dhtStats["database"].(map[string]interface{}); ok { - var sizeMB float64 - if v, ok := db["p2p_db_size"].(float64); ok { - sizeMB = v - } - var recs int64 - switch v := db["p2p_db_records_count"].(type) { - case int: - recs = int64(v) - case int64: - recs = v - case float64: - recs = int64(v) - } - metrics.Database = DatabaseStats{P2PDBSizeMB: sizeMB, P2PDBRecordsCount: recs} - } - - // Network handle metrics - if nhm, ok := dhtStats["network"].(map[string]kademlia.HandleCounters); ok { - for k, c := range nhm { - metrics.NetworkHandleMetrics[k] = HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} - } - } else if nhmI, ok := dhtStats["network"].(map[string]interface{}); ok { - for k, vi := range nhmI { - if c, ok := vi.(kademlia.HandleCounters); ok { - metrics.NetworkHandleMetrics[k] = HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} - } - } - } - - // Detailed recent per-request lists removed from API mapping - } - - // DHT rolling metrics snapshot is attached at top-level under dht_metrics - if snap, ok := p2pStats["dht_metrics"].(kademlia.DHTMetricsSnapshot); ok { - // Store success - for _, p := range snap.StoreSuccessRecent { - metrics.DhtMetrics.StoreSuccessRecent = append(metrics.DhtMetrics.StoreSuccessRecent, StoreSuccessPoint{ - TimeUnix: p.Time.Unix(), - Requests: int32(p.Requests), - Successful: int32(p.Successful), - SuccessRate: p.SuccessRate, - }) - } - // Batch retrieve - for _, p := range snap.BatchRetrieveRecent { - metrics.DhtMetrics.BatchRetrieveRecent = append(metrics.DhtMetrics.BatchRetrieveRecent, BatchRetrievePoint{ - TimeUnix: p.Time.Unix(), - Keys: int32(p.Keys), - Required: int32(p.Required), - FoundLocal: int32(p.FoundLocal), - FoundNetwork: int32(p.FoundNet), - DurationMS: p.Duration.Milliseconds(), - }) - } - metrics.DhtMetrics.HotPathBannedSkips = snap.HotPathBannedSkips - metrics.DhtMetrics.HotPathBanIncrements = snap.HotPathBanIncrements - } - } - } - - // Always include metrics (may be empty if not available) - resp.P2PMetrics = metrics - - // Calculate rank from top supernodes - if s.lumeraClient != nil && s.config != nil { - // Get current block height - blockInfo, err := s.lumeraClient.Node().GetLatestBlock(ctx) - if err != nil { - // Log error but continue - non-critical - logtrace.Error(ctx, "failed to get latest block", logtrace.Fields{logtrace.FieldError: err.Error()}) - } else { - // Get top supernodes for current block - topNodes, err := s.lumeraClient.SuperNode().GetTopSuperNodesForBlock(ctx, uint64(blockInfo.SdkBlock.Header.Height)) - if err != nil { - // Log error but continue - non-critical - logtrace.Error(ctx, "failed to get top supernodes", logtrace.Fields{logtrace.FieldError: err.Error()}) - } else { - // Find our rank - for idx, node := range topNodes.Supernodes { - if node.SupernodeAccount == s.config.SupernodeConfig.Identity { - resp.Rank = int32(idx + 1) // Rank starts from 1 - break - } - } - } - } - } - - if s.config != nil && s.lumeraClient != nil { - if supernodeInfo, err := s.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, s.config.SupernodeConfig.Identity); err == nil && supernodeInfo != nil { - resp.IPAddress = supernodeInfo.LatestAddress - } - - } - - // Log summary statistics - totalTasks := 0 - for _, service := range resp.RunningTasks { - totalTasks += int(service.TaskCount) - } - - return resp, nil -} diff --git a/supernode/services/common/supernode/service_test.go b/supernode/services/common/supernode/service_test.go deleted file mode 100644 index f7a9b4c4..00000000 --- a/supernode/services/common/supernode/service_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package supernode - -import ( - "context" - "testing" - - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" - "github.com/stretchr/testify/assert" -) - -func TestSupernodeStatusService(t *testing.T) { - ctx := context.Background() - - t.Run("empty service", func(t *testing.T) { - statusService := NewSupernodeStatusService(nil, nil, nil) - - resp, err := statusService.GetStatus(ctx, false) - assert.NoError(t, err) - - // Should have version info - assert.NotEmpty(t, resp.Version) - - // Should have uptime - assert.True(t, resp.UptimeSeconds >= 0) - - // Should have CPU and Memory info - assert.True(t, resp.Resources.CPU.UsagePercent >= 0) - assert.True(t, resp.Resources.CPU.UsagePercent <= 100) - assert.True(t, resp.Resources.CPU.Cores >= 0) - assert.True(t, resp.Resources.Memory.TotalGB > 0) - assert.True(t, resp.Resources.Memory.UsagePercent >= 0) - assert.True(t, resp.Resources.Memory.UsagePercent <= 100) - - // Should have hardware summary if cores and memory are available - if resp.Resources.CPU.Cores > 0 && resp.Resources.Memory.TotalGB > 0 { - assert.NotEmpty(t, resp.Resources.HardwareSummary) - } - - // Should have storage info (default root filesystem) - assert.NotEmpty(t, resp.Resources.Storage) - assert.Equal(t, "/", resp.Resources.Storage[0].Path) - - // Should have empty services list - assert.Empty(t, resp.RunningTasks) - assert.Empty(t, resp.RegisteredServices) - - // Should have default values for new fields - assert.Equal(t, int32(0), resp.Network.PeersCount) - assert.Empty(t, resp.Network.PeerAddresses) - assert.Equal(t, int32(0), resp.Rank) - assert.Empty(t, resp.IPAddress) - }) - - t.Run("single service with tasks", func(t *testing.T) { - statusService := NewSupernodeStatusService(nil, nil, nil) - - // Register a mock task provider - mockProvider := &common.MockTaskProvider{ - ServiceName: "test-service", - TaskIDs: []string{"task1", "task2", "task3"}, - } - statusService.RegisterTaskProvider(mockProvider) - - resp, err := statusService.GetStatus(ctx, false) - assert.NoError(t, err) - - // Should have one service - assert.Len(t, resp.RunningTasks, 1) - assert.Len(t, resp.RegisteredServices, 1) - assert.Equal(t, []string{"test-service"}, resp.RegisteredServices) - - service := resp.RunningTasks[0] - assert.Equal(t, "test-service", service.ServiceName) - assert.Equal(t, int32(3), service.TaskCount) - assert.Equal(t, []string{"task1", "task2", "task3"}, service.TaskIDs) - }) - - t.Run("multiple services", func(t *testing.T) { - statusService := NewSupernodeStatusService(nil, nil, nil) - - // Register multiple mock task providers - cascadeProvider := &common.MockTaskProvider{ - ServiceName: "cascade", - TaskIDs: []string{"cascade1", "cascade2"}, - } - senseProvider := &common.MockTaskProvider{ - ServiceName: "sense", - TaskIDs: []string{"sense1"}, - } - - statusService.RegisterTaskProvider(cascadeProvider) - statusService.RegisterTaskProvider(senseProvider) - - resp, err := statusService.GetStatus(ctx, false) - assert.NoError(t, err) - - // Should have two services - assert.Len(t, resp.RunningTasks, 2) - assert.Len(t, resp.RegisteredServices, 2) - assert.Contains(t, resp.RegisteredServices, "cascade") - assert.Contains(t, resp.RegisteredServices, "sense") - - // Check services are present - serviceMap := make(map[string]ServiceTasks) - for _, service := range resp.RunningTasks { - serviceMap[service.ServiceName] = service - } - - cascade, ok := serviceMap["cascade"] - assert.True(t, ok) - assert.Equal(t, int32(2), cascade.TaskCount) - assert.Equal(t, []string{"cascade1", "cascade2"}, cascade.TaskIDs) - - sense, ok := serviceMap["sense"] - assert.True(t, ok) - assert.Equal(t, int32(1), sense.TaskCount) - assert.Equal(t, []string{"sense1"}, sense.TaskIDs) - }) - - t.Run("service with no tasks", func(t *testing.T) { - statusService := NewSupernodeStatusService(nil, nil, nil) - - // Register a mock task provider with no tasks - mockProvider := &common.MockTaskProvider{ - ServiceName: "empty-service", - TaskIDs: []string{}, - } - statusService.RegisterTaskProvider(mockProvider) - - resp, err := statusService.GetStatus(ctx, false) - assert.NoError(t, err) - - // Should have one service - assert.Len(t, resp.RunningTasks, 1) - assert.Len(t, resp.RegisteredServices, 1) - assert.Equal(t, []string{"empty-service"}, resp.RegisteredServices) - - service := resp.RunningTasks[0] - assert.Equal(t, "empty-service", service.ServiceName) - assert.Equal(t, int32(0), service.TaskCount) - assert.Empty(t, service.TaskIDs) - }) -} diff --git a/supernode/services/common/supernode/types.go b/supernode/services/common/supernode/types.go deleted file mode 100644 index 6224d36d..00000000 --- a/supernode/services/common/supernode/types.go +++ /dev/null @@ -1,131 +0,0 @@ -package supernode - -// StatusResponse represents the complete system status information -// with clear organization of resources and services -type StatusResponse struct { - Version string // Supernode version - UptimeSeconds uint64 // Uptime in seconds - Resources Resources // System resource information - RunningTasks []ServiceTasks // Services with currently running tasks - RegisteredServices []string // All registered/available services - Network NetworkInfo // P2P network information - Rank int32 // Rank in the top supernodes list (0 if not in top list) - IPAddress string // Supernode IP address with port (e.g., "192.168.1.1:4445") - P2PMetrics P2PMetrics // Detailed P2P metrics snapshot -} - -// Resources contains system resource metrics -type Resources struct { - CPU CPUInfo // CPU usage information - Memory MemoryInfo // Memory usage information - Storage []StorageInfo // Storage volumes information - HardwareSummary string // Formatted hardware summary (e.g., "8 cores / 32GB RAM") -} - -// CPUInfo contains CPU usage metrics -type CPUInfo struct { - UsagePercent float64 // CPU usage percentage (0-100) - Cores int32 // Number of CPU cores -} - -// MemoryInfo contains memory usage metrics -type MemoryInfo struct { - TotalGB float64 // Total memory in GB - UsedGB float64 // Used memory in GB - AvailableGB float64 // Available memory in GB - UsagePercent float64 // Memory usage percentage (0-100) -} - -// StorageInfo contains storage metrics for a specific path -type StorageInfo struct { - Path string // Storage path being monitored - TotalBytes uint64 // Total storage in bytes - UsedBytes uint64 // Used storage in bytes - AvailableBytes uint64 // Available storage in bytes - UsagePercent float64 // Storage usage percentage (0-100) -} - -// ServiceTasks contains task information for a specific service -type ServiceTasks struct { - ServiceName string // Name of the service (e.g., "cascade") - TaskIDs []string // List of currently running task IDs - TaskCount int32 // Total number of running tasks -} - -// NetworkInfo contains P2P network information -type NetworkInfo struct { - PeersCount int32 // Number of connected peers in P2P network - PeerAddresses []string // List of connected peer addresses (optional, may be empty for privacy) -} - -// P2PMetrics mirrors the proto P2P metrics for status API -type P2PMetrics struct { - DhtMetrics DhtMetrics - NetworkHandleMetrics map[string]HandleCounters - ConnPoolMetrics map[string]int64 - BanList []BanEntry - Database DatabaseStats - Disk DiskStatus -} - -type StoreSuccessPoint struct { - TimeUnix int64 - Requests int32 - Successful int32 - SuccessRate float64 -} - -type BatchRetrievePoint struct { - TimeUnix int64 - Keys int32 - Required int32 - FoundLocal int32 - FoundNetwork int32 - DurationMS int64 -} - -type DhtMetrics struct { - StoreSuccessRecent []StoreSuccessPoint - BatchRetrieveRecent []BatchRetrievePoint - HotPathBannedSkips int64 - HotPathBanIncrements int64 -} - -type HandleCounters struct { - Total int64 - Success int64 - Failure int64 - Timeout int64 -} - -type BanEntry struct { - ID string - IP string - Port uint32 - Count int32 - CreatedAtUnix int64 - AgeSeconds int64 -} - -type DatabaseStats struct { - P2PDBSizeMB float64 - P2PDBRecordsCount int64 -} - -type DiskStatus struct { - AllMB float64 - UsedMB float64 - FreeMB float64 -} - -// Removed: recent per-request lists from public API - -// TaskProvider interface defines the contract for services to provide -// their running task information to the status service -type TaskProvider interface { - // GetServiceName returns the unique name identifier for this service - GetServiceName() string - - // GetRunningTasks returns a list of currently active task IDs - GetRunningTasks() []string -} diff --git a/supernode/services/common/task_status.go b/supernode/services/common/task_status.go deleted file mode 100644 index 22b63b7a..00000000 --- a/supernode/services/common/task_status.go +++ /dev/null @@ -1,51 +0,0 @@ -package common - -// List of task statuses. -const ( - StatusTaskStarted Status = iota - // Mode - StatusPrimaryMode - StatusSecondaryMode - - // Process - StatusConnected - - // Final - StatusTaskCanceled - StatusTaskCompleted -) - -var statusNames = map[Status]string{ - StatusTaskStarted: "Task started", - StatusTaskCanceled: "Task Canceled", - StatusTaskCompleted: "Task Completed", -} - -// Status represents status of the task -type Status byte - -func (status Status) String() string { - if name, ok := statusNames[status]; ok { - return name - } - return "" -} - -// IsFinal returns true if the status is the final. -func (status Status) IsFinal() bool { - return status == StatusTaskCanceled || status == StatusTaskCompleted -} - -// IsFailure returns true if the task failed due to an error -func (status Status) IsFailure() bool { - return status == StatusTaskCanceled -} - -// StatusNames returns a sorted list of status names. -func StatusNames() []string { - list := make([]string, len(statusNames)) - for i, name := range statusNames { - list[i] = name - } - return list -} diff --git a/supernode/services/common/task_status_test.go b/supernode/services/common/task_status_test.go deleted file mode 100644 index b9853120..00000000 --- a/supernode/services/common/task_status_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package common - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestStatus_String(t *testing.T) { - tests := []struct { - status Status - expected string - }{ - {StatusTaskStarted, "Task started"}, - {StatusTaskCanceled, "Task Canceled"}, - {StatusTaskCompleted, "Task Completed"}, - {StatusPrimaryMode, ""}, - {StatusSecondaryMode, ""}, - {StatusConnected, ""}, - {Status(255), ""}, // unknown status - } - - for _, tt := range tests { - assert.Equal(t, tt.expected, tt.status.String(), "Status.String() should match expected name") - } -} - -func TestStatus_IsFinal(t *testing.T) { - tests := []struct { - status Status - expected bool - }{ - {StatusTaskStarted, false}, - {StatusPrimaryMode, false}, - {StatusSecondaryMode, false}, - {StatusConnected, false}, - {StatusTaskCanceled, true}, - {StatusTaskCompleted, true}, - } - - for _, tt := range tests { - assert.Equal(t, tt.expected, tt.status.IsFinal(), "Status.IsFinal() mismatch") - } -} - -func TestStatus_IsFailure(t *testing.T) { - tests := []struct { - status Status - expected bool - }{ - {StatusTaskStarted, false}, - {StatusTaskCanceled, true}, - {StatusTaskCompleted, false}, - } - - for _, tt := range tests { - assert.Equal(t, tt.expected, tt.status.IsFailure(), "Status.IsFailure() mismatch") - } -} diff --git a/supernode/services/common/test_helpers.go b/supernode/services/common/test_helpers.go deleted file mode 100644 index c49b940a..00000000 --- a/supernode/services/common/test_helpers.go +++ /dev/null @@ -1,15 +0,0 @@ -package common - -// MockTaskProvider for testing (exported for use in other packages) -type MockTaskProvider struct { - ServiceName string - TaskIDs []string -} - -func (m *MockTaskProvider) GetServiceName() string { - return m.ServiceName -} - -func (m *MockTaskProvider) GetRunningTasks() []string { - return m.TaskIDs -} diff --git a/supernode/services/verifier/interface.go b/supernode/services/verifier/interface.go deleted file mode 100644 index 7414201a..00000000 --- a/supernode/services/verifier/interface.go +++ /dev/null @@ -1,55 +0,0 @@ -package verifier - -import ( - "context" - "strings" -) - -// ConfigVerifierService defines the interface for config verification service -type ConfigVerifierService interface { - // VerifyConfig performs comprehensive config validation against chain - VerifyConfig(ctx context.Context) (*VerificationResult, error) -} - -// VerificationResult contains the results of config verification -type VerificationResult struct { - Valid bool `json:"valid"` - Errors []ConfigError `json:"errors,omitempty"` - Warnings []ConfigError `json:"warnings,omitempty"` -} - -// ConfigError represents a configuration validation error or warning -type ConfigError struct { - Field string `json:"field"` - Expected string `json:"expected,omitempty"` - Actual string `json:"actual,omitempty"` - Message string `json:"message"` -} - -// IsValid returns true if all verifications passed -func (vr *VerificationResult) IsValid() bool { - return vr.Valid && len(vr.Errors) == 0 -} - -// HasWarnings returns true if there are any warnings -func (vr *VerificationResult) HasWarnings() bool { - return len(vr.Warnings) > 0 -} - -// Summary returns a human-readable summary of verification results -func (vr *VerificationResult) Summary() string { - if vr.IsValid() && !vr.HasWarnings() { - return "✓ Config verification successful" - } - - var summary string - for _, err := range vr.Errors { - summary += "✗ " + err.Message + "\n" - } - - for _, warn := range vr.Warnings { - summary += "⚠ " + warn.Message + "\n" - } - - return strings.TrimSuffix(summary, "\n") -} diff --git a/supernode/services/verifier/verifier.go b/supernode/services/verifier/verifier.go deleted file mode 100644 index 68a2ae77..00000000 --- a/supernode/services/verifier/verifier.go +++ /dev/null @@ -1,222 +0,0 @@ -package verifier - -import ( - "context" - "fmt" - "net" - - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - snmodule "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/cosmos/cosmos-sdk/crypto/keyring" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// ConfigVerifier implements ConfigVerifierService -type ConfigVerifier struct { - config *config.Config - lumeraClient lumera.Client - keyring keyring.Keyring -} - -// NewConfigVerifier creates a new config verifier service -func NewConfigVerifier(cfg *config.Config, client lumera.Client, kr keyring.Keyring) ConfigVerifierService { - return &ConfigVerifier{ - config: cfg, - lumeraClient: client, - keyring: kr, - } -} - -// VerifyConfig performs comprehensive config validation against chain -func (cv *ConfigVerifier) VerifyConfig(ctx context.Context) (*VerificationResult, error) { - result := &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{}, - } - - logtrace.Debug(ctx, "Starting config verification", logtrace.Fields{ - "identity": cv.config.SupernodeConfig.Identity, - "key_name": cv.config.SupernodeConfig.KeyName, - "p2p_port": cv.config.P2PConfig.Port, - }) - - // Check 1: Verify keyring contains the key - if err := cv.checkKeyExists(result); err != nil { - return result, err - } - - // Check 2: Verify key resolves to correct identity - if err := cv.checkIdentityMatches(result); err != nil { - return result, err - } - - // If keyring checks failed, don't proceed with chain queries - if !result.IsValid() { - return result, nil - } - - // Check 3: Query chain for supernode registration - supernodeInfo, err := cv.checkSupernodeExists(ctx, result) - if err != nil { - return result, err - } - - // If supernode doesn't exist, don't proceed with field comparisons - if supernodeInfo == nil { - return result, nil - } - - // Check 4: Verify supernode state is active - cv.checkSupernodeState(result, supernodeInfo) - - // Check 5: Verify all required ports are available - cv.checkPortsAvailable(result) - - logtrace.Debug(ctx, "Config verification completed", logtrace.Fields{ - "valid": result.IsValid(), - "errors": len(result.Errors), - "warnings": len(result.Warnings), - }) - - return result, nil -} - -// checkKeyExists verifies the configured key exists in keyring -func (cv *ConfigVerifier) checkKeyExists(result *VerificationResult) error { - _, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) - if err != nil { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "key_name", - Actual: cv.config.SupernodeConfig.KeyName, - Message: fmt.Sprintf("Key '%s' not found in keyring", cv.config.SupernodeConfig.KeyName), - }) - } - return nil -} - -// checkIdentityMatches verifies key resolves to configured identity -func (cv *ConfigVerifier) checkIdentityMatches(result *VerificationResult) error { - keyInfo, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) - if err != nil { - // Already handled in checkKeyExists - return nil - } - - pubKey, err := keyInfo.GetPubKey() - if err != nil { - return fmt.Errorf("failed to get public key for key '%s': %w", cv.config.SupernodeConfig.KeyName, err) - } - - addr := sdk.AccAddress(pubKey.Address()) - if addr.String() != cv.config.SupernodeConfig.Identity { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "identity", - Expected: addr.String(), - Actual: cv.config.SupernodeConfig.Identity, - Message: fmt.Sprintf("Key '%s' resolves to %s but config identity is %s", cv.config.SupernodeConfig.KeyName, addr.String(), cv.config.SupernodeConfig.Identity), - }) - } - return nil -} - -// checkSupernodeExists queries chain for supernode registration -func (cv *ConfigVerifier) checkSupernodeExists(ctx context.Context, result *VerificationResult) (*snmodule.SuperNodeInfo, error) { - sn, err := cv.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, cv.config.SupernodeConfig.Identity) - if err != nil { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "registration", - Actual: "not_registered", - Message: fmt.Sprintf("Supernode not registered on chain for address %s", cv.config.SupernodeConfig.Identity), - }) - return nil, nil - } - return sn, nil -} - -// checkP2PPortMatches compares config P2P port with chain -func (cv *ConfigVerifier) checkP2PPortMatches(result *VerificationResult, supernodeInfo *snmodule.SuperNodeInfo) { - configPort := fmt.Sprintf("%d", cv.config.P2PConfig.Port) - chainPort := supernodeInfo.P2PPort - - if chainPort != "" && chainPort != configPort { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "p2p_port", - Expected: chainPort, - Actual: configPort, - Message: fmt.Sprintf("P2P port mismatch: config=%s, chain=%s", configPort, chainPort), - }) - } -} - -// checkSupernodeState verifies supernode is in active state -func (cv *ConfigVerifier) checkSupernodeState(result *VerificationResult, supernodeInfo *snmodule.SuperNodeInfo) { - if supernodeInfo.CurrentState != "" && supernodeInfo.CurrentState != "SUPERNODE_STATE_ACTIVE" { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "state", - Expected: "SUPERNODE_STATE_ACTIVE", - Actual: supernodeInfo.CurrentState, - Message: fmt.Sprintf("Supernode state is %s (expected ACTIVE)", supernodeInfo.CurrentState), - }) - } -} - -// checkPortsAvailable verifies that all required ports are available for binding -func (cv *ConfigVerifier) checkPortsAvailable(result *VerificationResult) { - // Check supernode port - if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.SupernodeConfig.Port)) { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "supernode_port", - Actual: fmt.Sprintf("%d", cv.config.SupernodeConfig.Port), - Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.SupernodeConfig.Port), - }) - } - - // Check P2P port - if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.P2PConfig.Port)) { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "p2p_port", - Actual: fmt.Sprintf("%d", cv.config.P2PConfig.Port), - Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.P2PConfig.Port), - }) - } - - // Check gateway port (use configured port or default port 8002) - gatewayPort := int(cv.config.SupernodeConfig.GatewayPort) - if gatewayPort == 0 { - gatewayPort = 8002 // Default gateway port (same as gateway.DefaultGatewayPort) - } - - if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, gatewayPort) { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "gateway_port", - Actual: fmt.Sprintf("%d", gatewayPort), - Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", gatewayPort), - }) - } -} - -// isPortAvailable checks if a port is available for binding -func (cv *ConfigVerifier) isPortAvailable(host string, port int) bool { - address := fmt.Sprintf("%s:%d", host, port) - - // Try to listen on the port - listener, err := net.Listen("tcp", address) - if err != nil { - return false // Port is not available - } - - // Close the listener immediately since we're just checking availability - listener.Close() - return true // Port is available -} diff --git a/supernode/services/verifier/verifier_test.go b/supernode/services/verifier/verifier_test.go deleted file mode 100644 index 56fd3fb7..00000000 --- a/supernode/services/verifier/verifier_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package verifier - -import ( - "net" - "strconv" - "testing" - - "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/stretchr/testify/assert" -) - -func TestNewConfigVerifier(t *testing.T) { - cfg := &config.Config{ - SupernodeConfig: config.SupernodeConfig{ - Identity: "lumera1testaddress", - KeyName: "test-key", - Host: "192.168.1.100", - }, - P2PConfig: config.P2PConfig{ - Port: 4445, - }, - } - - // Test that NewConfigVerifier returns a non-nil service - verifier := NewConfigVerifier(cfg, nil, nil) - assert.NotNil(t, verifier) - assert.Implements(t, (*ConfigVerifierService)(nil), verifier) -} - -func TestVerificationResult_IsValid(t *testing.T) { - tests := []struct { - name string - result *VerificationResult - expected bool - }{ - { - name: "valid with no errors", - result: &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - }, - expected: true, - }, - { - name: "invalid with errors", - result: &VerificationResult{ - Valid: false, - Errors: []ConfigError{ - {Message: "test error"}, - }, - }, - expected: false, - }, - { - name: "valid flag true but has errors", - result: &VerificationResult{ - Valid: true, - Errors: []ConfigError{ - {Message: "test error"}, - }, - }, - expected: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expected, tt.result.IsValid()) - }) - } -} - -func TestVerificationResult_HasWarnings(t *testing.T) { - tests := []struct { - name string - result *VerificationResult - expected bool - }{ - { - name: "no warnings", - result: &VerificationResult{ - Warnings: []ConfigError{}, - }, - expected: false, - }, - { - name: "has warnings", - result: &VerificationResult{ - Warnings: []ConfigError{ - {Message: "test warning"}, - }, - }, - expected: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expected, tt.result.HasWarnings()) - }) - } -} - -func TestVerificationResult_Summary(t *testing.T) { - tests := []struct { - name string - result *VerificationResult - contains []string - }{ - { - name: "success with no warnings", - result: &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{}, - }, - contains: []string{"✓ Config verification successful"}, - }, - { - name: "error message", - result: &VerificationResult{ - Valid: false, - Errors: []ConfigError{ - { - Message: "Key not found", - }, - }, - }, - contains: []string{"✗ Key not found"}, - }, - { - name: "warning message", - result: &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{ - { - Message: "Host mismatch: config=localhost, chain=192.168.1.1", - }, - }, - }, - contains: []string{"⚠ Host mismatch: config=localhost, chain=192.168.1.1"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - summary := tt.result.Summary() - for _, expected := range tt.contains { - assert.Contains(t, summary, expected) - } - }) - } -} - -func TestConfigVerifier_isPortAvailable(t *testing.T) { - cfg := &config.Config{ - SupernodeConfig: config.SupernodeConfig{ - Identity: "lumera1testaddress", - KeyName: "test-key", - Host: "127.0.0.1", - }, - } - - verifier := NewConfigVerifier(cfg, nil, nil).(*ConfigVerifier) - - // Test available port - available := verifier.isPortAvailable("127.0.0.1", 0) // Port 0 lets OS choose available port - assert.True(t, available) - - // Test unavailable port by creating a listener - listener, err := net.Listen("tcp", "127.0.0.1:0") - assert.NoError(t, err) - defer listener.Close() - - // Extract the port that was assigned - _, portStr, err := net.SplitHostPort(listener.Addr().String()) - assert.NoError(t, err) - port, err := strconv.Atoi(portStr) - assert.NoError(t, err) - - // Now test that this port is not available - available = verifier.isPortAvailable("127.0.0.1", port) - assert.False(t, available) -} - -func TestConfigVerifier_checkPortsAvailable(t *testing.T) { - // Create a listener to occupy a port - listener, err := net.Listen("tcp", "127.0.0.1:0") - assert.NoError(t, err) - defer listener.Close() - - // Extract the port that was assigned - _, portStr, err := net.SplitHostPort(listener.Addr().String()) - assert.NoError(t, err) - port, err := strconv.Atoi(portStr) - assert.NoError(t, err) - - cfg := &config.Config{ - SupernodeConfig: config.SupernodeConfig{ - Identity: "lumera1testaddress", - KeyName: "test-key", - Host: "127.0.0.1", - Port: uint16(port), // Use the occupied port - }, - P2PConfig: config.P2PConfig{ - Port: 0, // Available port - }, - } - - verifier := NewConfigVerifier(cfg, nil, nil).(*ConfigVerifier) - result := &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{}, - } - - verifier.checkPortsAvailable(result) - - // Should have error for supernode port being unavailable - assert.False(t, result.IsValid()) - assert.Len(t, result.Errors, 1) - assert.Equal(t, "supernode_port", result.Errors[0].Field) - assert.Contains(t, result.Errors[0].Message, "already in use") -} - -func TestConfigVerifier_checkPortsAvailable_DefaultGatewayPort(t *testing.T) { - // Create a listener to occupy the default gateway port 8002 - listener, err := net.Listen("tcp", "127.0.0.1:8002") - assert.NoError(t, err) - defer listener.Close() - - cfg := &config.Config{ - SupernodeConfig: config.SupernodeConfig{ - Identity: "lumera1testaddress", - KeyName: "test-key", - Host: "127.0.0.1", - Port: 4444, // Available port - GatewayPort: 0, // Not configured, should use default 8002 - }, - P2PConfig: config.P2PConfig{ - Port: 4445, // Available port - }, - } - - verifier := NewConfigVerifier(cfg, nil, nil).(*ConfigVerifier) - result := &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{}, - } - - verifier.checkPortsAvailable(result) - - // Should have error for default gateway port being unavailable - assert.False(t, result.IsValid()) - assert.Len(t, result.Errors, 1) - assert.Equal(t, "gateway_port", result.Errors[0].Field) - assert.Equal(t, "8002", result.Errors[0].Actual) - assert.Contains(t, result.Errors[0].Message, "Port 8002 is already in use") -} diff --git a/supernode/services/common/supernode/metrics.go b/supernode/status/metrics.go similarity index 73% rename from supernode/services/common/supernode/metrics.go rename to supernode/status/metrics.go index 718c2a8f..ff29d100 100644 --- a/supernode/services/common/supernode/metrics.go +++ b/supernode/status/metrics.go @@ -1,4 +1,4 @@ -package supernode +package status import ( "context" @@ -14,19 +14,15 @@ import ( type MetricsCollector struct{} // NewMetricsCollector creates a new metrics collector instance -func NewMetricsCollector() *MetricsCollector { - return &MetricsCollector{} -} +func NewMetricsCollector() *MetricsCollector { return &MetricsCollector{} } // CollectCPUMetrics gathers CPU usage information -// Returns usage percentage as a float64 func (m *MetricsCollector) CollectCPUMetrics(ctx context.Context) (float64, error) { percentages, err := cpu.Percent(time.Second, false) if err != nil { logtrace.Error(ctx, "failed to get cpu info", logtrace.Fields{logtrace.FieldError: err.Error()}) return 0, err } - return percentages[0], nil } @@ -37,49 +33,41 @@ func (m *MetricsCollector) GetCPUCores(ctx context.Context) (int32, error) { logtrace.Error(ctx, "failed to get cpu core count", logtrace.Fields{logtrace.FieldError: err.Error()}) return 0, err } - return int32(cores), nil } // CollectMemoryMetrics gathers memory usage information -// Returns memory statistics including total, used, available, and usage percentage func (m *MetricsCollector) CollectMemoryMetrics(ctx context.Context) (total, used, available uint64, usedPerc float64, err error) { vmem, err := mem.VirtualMemory() if err != nil { logtrace.Error(ctx, "failed to get memory info", logtrace.Fields{logtrace.FieldError: err.Error()}) return 0, 0, 0, 0, err } - return vmem.Total, vmem.Used, vmem.Available, vmem.UsedPercent, nil } +// StorageInfo holds disk usage stats +type StorageInfo struct { + Path string + TotalBytes uint64 + UsedBytes uint64 + AvailableBytes uint64 + UsagePercent float64 +} + // CollectStorageMetrics gathers storage usage information for specified paths -// If paths is empty, it will collect metrics for the root filesystem func (m *MetricsCollector) CollectStorageMetrics(ctx context.Context, paths []string) []StorageInfo { if len(paths) == 0 { - // Default to root filesystem paths = []string{"/"} } - var storageInfos []StorageInfo for _, path := range paths { usage, err := disk.Usage(path) if err != nil { - logtrace.Error(ctx, "failed to get storage info", logtrace.Fields{ - logtrace.FieldError: err.Error(), - "path": path, - }) - continue // Skip this path but continue with others + logtrace.Error(ctx, "failed to get storage info", logtrace.Fields{logtrace.FieldError: err.Error(), "path": path}) + continue } - - storageInfos = append(storageInfos, StorageInfo{ - Path: path, - TotalBytes: usage.Total, - UsedBytes: usage.Used, - AvailableBytes: usage.Free, - UsagePercent: usage.UsedPercent, - }) + storageInfos = append(storageInfos, StorageInfo{Path: path, TotalBytes: usage.Total, UsedBytes: usage.Used, AvailableBytes: usage.Free, UsagePercent: usage.UsedPercent}) } - return storageInfos } diff --git a/supernode/status/service.go b/supernode/status/service.go new file mode 100644 index 00000000..0645385f --- /dev/null +++ b/supernode/status/service.go @@ -0,0 +1,226 @@ +package status + +import ( + "context" + "fmt" + "time" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/p2p" + "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/task" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/supernode/config" +) + +// Version is the supernode version, set by the main application +var Version = "dev" + +const statusSubsystemTimeout = 8 * time.Second + +// SupernodeStatusService provides centralized status information +type SupernodeStatusService struct { + metrics *MetricsCollector + storagePaths []string + startTime time.Time + p2pService p2p.Client + lumeraClient lumera.Client + config *config.Config + tracker task.Tracker +} + +// NewSupernodeStatusService creates a new supernode status service instance +func NewSupernodeStatusService(p2pService p2p.Client, lumeraClient lumera.Client, cfg *config.Config, tracker task.Tracker) *SupernodeStatusService { + return &SupernodeStatusService{metrics: NewMetricsCollector(), storagePaths: []string{"/"}, startTime: time.Now(), p2pService: p2pService, lumeraClient: lumeraClient, config: cfg, tracker: tracker} +} + +// GetChainID returns the chain ID from the configuration +func (s *SupernodeStatusService) GetChainID() string { + if s.config != nil { + return s.config.LumeraClientConfig.ChainID + } + return "" +} + +// GetStatus returns the current system status including optional P2P info +func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetrics bool) (*pb.StatusResponse, error) { + fields := logtrace.Fields{logtrace.FieldMethod: "GetStatus", logtrace.FieldModule: "SupernodeStatusService"} + logtrace.Debug(ctx, "status request received", fields) + + resp := &pb.StatusResponse{} + resp.Version = Version + resp.UptimeSeconds = uint64(time.Since(s.startTime).Seconds()) + + cpuUsage, err := s.metrics.CollectCPUMetrics(ctx) + if err != nil { + return resp, err + } + if resp.Resources == nil { + resp.Resources = &pb.StatusResponse_Resources{} + } + if resp.Resources.Cpu == nil { + resp.Resources.Cpu = &pb.StatusResponse_Resources_CPU{} + } + resp.Resources.Cpu.UsagePercent = cpuUsage + cores, err := s.metrics.GetCPUCores(ctx) + if err != nil { + logtrace.Error(ctx, "failed to get cpu cores", logtrace.Fields{logtrace.FieldError: err.Error()}) + cores = 0 + } + resp.Resources.Cpu.Cores = cores + memTotal, memUsed, memAvail, memUsedPerc, err := s.metrics.CollectMemoryMetrics(ctx) + if err != nil { + return resp, err + } + const bytesToGB = 1024 * 1024 * 1024 + if resp.Resources.Memory == nil { + resp.Resources.Memory = &pb.StatusResponse_Resources_Memory{} + } + resp.Resources.Memory.TotalGb = float64(memTotal) / bytesToGB + resp.Resources.Memory.UsedGb = float64(memUsed) / bytesToGB + resp.Resources.Memory.AvailableGb = float64(memAvail) / bytesToGB + resp.Resources.Memory.UsagePercent = memUsedPerc + if cores > 0 && resp.Resources.Memory.TotalGb > 0 { + resp.Resources.HardwareSummary = fmt.Sprintf("%d cores / %.0fGB RAM", cores, resp.Resources.Memory.TotalGb) + } + // Storage metrics + for _, si := range s.metrics.CollectStorageMetrics(ctx, s.storagePaths) { + resp.Resources.StorageVolumes = append(resp.Resources.StorageVolumes, &pb.StatusResponse_Resources_Storage{ + Path: si.Path, + TotalBytes: si.TotalBytes, + UsedBytes: si.UsedBytes, + AvailableBytes: si.AvailableBytes, + UsagePercent: si.UsagePercent, + }) + } + + if resp.Network == nil { + resp.Network = &pb.StatusResponse_Network{} + } + resp.Network.PeersCount = 0 + resp.Network.PeerAddresses = []string{} + + // Populate running tasks from injected tracker + if s.tracker != nil { + snap := s.tracker.Snapshot() + if len(snap) > 0 { + for svc, ids := range snap { + resp.RunningTasks = append(resp.RunningTasks, &pb.StatusResponse_ServiceTasks{ + ServiceName: svc, + TaskIds: ids, + TaskCount: int32(len(ids)), + }) + } + } + } + + // Prepare optional P2P metrics container + pm := &pb.StatusResponse_P2PMetrics{ + DhtMetrics: &pb.StatusResponse_P2PMetrics_DhtMetrics{}, + NetworkHandleMetrics: map[string]*pb.StatusResponse_P2PMetrics_HandleCounters{}, + ConnPoolMetrics: map[string]int64{}, + BanList: []*pb.StatusResponse_P2PMetrics_BanEntry{}, + Database: &pb.StatusResponse_P2PMetrics_DatabaseStats{}, + Disk: &pb.StatusResponse_P2PMetrics_DiskStatus{}, + } + + if includeP2PMetrics && s.p2pService != nil { + // Bound P2P metrics collection so status can't hang if P2P is slow + p2pCtx, cancel := context.WithTimeout(ctx, statusSubsystemTimeout) + defer cancel() + p2pStats, err := s.p2pService.Stats(p2pCtx) + if err != nil { + logtrace.Error(ctx, "failed to get p2p stats", logtrace.Fields{logtrace.FieldError: err.Error()}) + } else { + if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { + if peersCount, ok := dhtStats["peers_count"].(int); ok { + resp.Network.PeersCount = int32(peersCount) + } + if peers, ok := dhtStats["peers"].([]*kademlia.Node); ok { + resp.Network.PeerAddresses = make([]string, 0, len(peers)) + for _, peer := range peers { + resp.Network.PeerAddresses = append(resp.Network.PeerAddresses, fmt.Sprintf("%s@%s:%d", string(peer.ID), peer.IP, peer.Port)) + } + } else { + resp.Network.PeerAddresses = []string{} + } + } + if du, ok := p2pStats["disk-info"].(utils.DiskStatus); ok { + pm.Disk.AllMb = du.All + pm.Disk.UsedMb = du.Used + pm.Disk.FreeMb = du.Free + } else if duPtr, ok := p2pStats["disk-info"].(*utils.DiskStatus); ok && duPtr != nil { + pm.Disk.AllMb = duPtr.All + pm.Disk.UsedMb = duPtr.Used + pm.Disk.FreeMb = duPtr.Free + } + if bans, ok := p2pStats["ban-list"].([]kademlia.BanSnapshot); ok { + for _, b := range bans { + pm.BanList = append(pm.BanList, &pb.StatusResponse_P2PMetrics_BanEntry{Id: b.ID, Ip: b.IP, Port: uint32(b.Port), Count: int32(b.Count), CreatedAtUnix: b.CreatedAt.Unix(), AgeSeconds: int64(b.Age.Seconds())}) + } + } + if pool, ok := p2pStats["conn-pool"].(map[string]int64); ok { + for k, v := range pool { + pm.ConnPoolMetrics[k] = v + } + } + if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { + if db, ok := dhtStats["database"].(map[string]interface{}); ok { + var sizeMB float64 + if v, ok := db["p2p_db_size"].(float64); ok { + sizeMB = v + } + var recs int64 + switch v := db["p2p_db_records_count"].(type) { + case int: + recs = int64(v) + case int64: + recs = v + case float64: + recs = int64(v) + } + pm.Database.P2PDbSizeMb = sizeMB + pm.Database.P2PDbRecordsCount = recs + } + if nhm, ok := dhtStats["network"].(map[string]kademlia.HandleCounters); ok { + for k, c := range nhm { + pm.NetworkHandleMetrics[k] = &pb.StatusResponse_P2PMetrics_HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} + } + } else if nhmI, ok := dhtStats["network"].(map[string]interface{}); ok { + for k, vi := range nhmI { + if c, ok := vi.(kademlia.HandleCounters); ok { + pm.NetworkHandleMetrics[k] = &pb.StatusResponse_P2PMetrics_HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} + } + } + } + } + if snap, ok := p2pStats["dht_metrics"].(kademlia.DHTMetricsSnapshot); ok { + for _, sp := range snap.StoreSuccessRecent { + pm.DhtMetrics.StoreSuccessRecent = append(pm.DhtMetrics.StoreSuccessRecent, &pb.StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{TimeUnix: sp.Time.Unix(), Requests: int32(sp.Requests), Successful: int32(sp.Successful), SuccessRate: sp.SuccessRate}) + } + for _, bp := range snap.BatchRetrieveRecent { + pm.DhtMetrics.BatchRetrieveRecent = append(pm.DhtMetrics.BatchRetrieveRecent, &pb.StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{TimeUnix: bp.Time.Unix(), Keys: int32(bp.Keys), Required: int32(bp.Required), FoundLocal: int32(bp.FoundLocal), FoundNetwork: int32(bp.FoundNet), DurationMs: bp.Duration.Milliseconds()}) + } + pm.DhtMetrics.HotPathBannedSkips = snap.HotPathBannedSkips + pm.DhtMetrics.HotPathBanIncrements = snap.HotPathBanIncrements + } + } + } + if includeP2PMetrics { + resp.P2PMetrics = pm + } + + if s.config != nil && s.lumeraClient != nil { + // Bound chain query for latest address to avoid slow network hangs + chainCtx, cancel := context.WithTimeout(ctx, statusSubsystemTimeout) + defer cancel() + if supernodeInfo, err := s.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(chainCtx, s.config.SupernodeConfig.Identity); err == nil && supernodeInfo != nil { + resp.IpAddress = supernodeInfo.LatestAddress + } else if err != nil { + logtrace.Error(ctx, "failed to resolve latest supernode address", logtrace.Fields{logtrace.FieldError: err.Error()}) + } + } + return resp, nil +} diff --git a/supernode/transport/gateway/server.go b/supernode/transport/gateway/server.go new file mode 100644 index 00000000..5c4df034 --- /dev/null +++ b/supernode/transport/gateway/server.go @@ -0,0 +1,236 @@ +package gateway + +import ( + "context" + "fmt" + "net" + "net/http" + _ "net/http/pprof" + "os" + "strconv" + "strings" + "time" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "google.golang.org/protobuf/encoding/protojson" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" +) + +// DefaultGatewayPort is an uncommon port for internal gateway use +const DefaultGatewayPort = 8002 + +// Server represents the HTTP gateway server +type Server struct { + ipAddress string + port int + server *http.Server + supernodeServer pb.SupernodeServiceServer + chainID string + pprofEnabled bool +} + +// NewServer creates a new HTTP gateway server that directly calls the service +// If port is 0, it will use the default port +func NewServer(ipAddress string, port int, supernodeServer pb.SupernodeServiceServer) (*Server, error) { + if supernodeServer == nil { + return nil, fmt.Errorf("supernode server is required") + } + + // Use default port if not specified + if port == 0 { + port = DefaultGatewayPort + } + + return &Server{ + ipAddress: ipAddress, + port: port, + supernodeServer: supernodeServer, + }, nil +} + +// NewServerWithConfig creates a new HTTP gateway server with additional configuration +func NewServerWithConfig(ipAddress string, port int, supernodeServer pb.SupernodeServiceServer, chainID string) (*Server, error) { + if supernodeServer == nil { + return nil, fmt.Errorf("supernode server is required") + } + + // Use default port if not specified + if port == 0 { + port = DefaultGatewayPort + } + + // Determine if pprof should be enabled + pprofEnabled := strings.Contains(strings.ToLower(chainID), "testnet") || os.Getenv("ENABLE_PPROF") == "true" + + return &Server{ + ipAddress: ipAddress, + port: port, + supernodeServer: supernodeServer, + chainID: chainID, + pprofEnabled: pprofEnabled, + }, nil +} + +// Run starts the HTTP gateway server (implements service interface) +func (s *Server) Run(ctx context.Context) error { + // Create gRPC-Gateway mux with custom JSON marshaler options + mux := runtime.NewServeMux( + runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.JSONPb{ + MarshalOptions: protojson.MarshalOptions{ + EmitUnpopulated: true, // This ensures zero values are included + UseProtoNames: true, // Use original proto field names + }, + }), + ) + + // Register the service handler directly + err := pb.RegisterSupernodeServiceHandlerServer(ctx, mux, s.supernodeServer) + if err != nil { + return fmt.Errorf("failed to register gateway handler: %w", err) + } + + // Create HTTP mux for custom endpoints + httpMux := http.NewServeMux() + + // Register raw pprof endpoints BEFORE the gRPC gateway to intercept them + // These must be registered before the /api/ handler to take precedence + if s.pprofEnabled { + // Raw pprof endpoints that return actual pprof data (not JSON) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/heap", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/goroutine", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/allocs", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/block", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/mutex", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/threadcreate", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/profile", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/cmdline", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/symbol", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/trace", s.rawPprofHandler) + } + + // Register gRPC-Gateway endpoints + httpMux.Handle("/api/", mux) + + // Register Swagger endpoints + httpMux.HandleFunc("/swagger.json", s.serveSwaggerJSON) + httpMux.HandleFunc("/swagger-ui/", s.serveSwaggerUI) + + // Register pprof endpoints (only on testnet) + if s.pprofEnabled { + httpMux.HandleFunc("/debug/pprof/", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/cmdline", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/profile", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/symbol", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/trace", s.pprofHandler) + // Register specific pprof profiles + httpMux.HandleFunc("/debug/pprof/allocs", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/block", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/goroutine", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/heap", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/mutex", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/threadcreate", s.pprofHandler) + + logtrace.Debug(ctx, "Pprof endpoints enabled on gateway", logtrace.Fields{ + "chain_id": s.chainID, + "port": s.port, + }) + } + + httpMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/" { + http.Redirect(w, r, "/swagger-ui/", http.StatusFound) + } else { + http.NotFound(w, r) + } + }) + + // Create HTTP server + s.server = &http.Server{ + Addr: net.JoinHostPort(s.ipAddress, strconv.Itoa(s.port)), + Handler: s.corsMiddleware(httpMux), + ReadTimeout: 15 * time.Second, + WriteTimeout: 15 * time.Second, + IdleTimeout: 60 * time.Second, + } + + logtrace.Debug(ctx, "Starting HTTP gateway server", logtrace.Fields{ + "address": s.ipAddress, + "port": s.port, + "pprof_enabled": s.pprofEnabled, + }) + + // Start server + if err := s.server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + return fmt.Errorf("gateway server failed: %w", err) + } + + return nil +} + +// Stop gracefully stops the HTTP gateway server (implements service interface) +func (s *Server) Stop(ctx context.Context) error { + if s.server == nil { + return nil + } + + logtrace.Debug(ctx, "Shutting down HTTP gateway server", nil) + return s.server.Shutdown(ctx) +} + +// corsMiddleware adds CORS headers for web access +func (s *Server) corsMiddleware(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS") + w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, Authorization") + + if r.Method == "OPTIONS" { + w.WriteHeader(http.StatusOK) + return + } + + h.ServeHTTP(w, r) + }) +} + +// pprofHandler proxies requests to the pprof handlers +func (s *Server) pprofHandler(w http.ResponseWriter, r *http.Request) { + // Check if pprof is enabled + if !s.pprofEnabled { + http.Error(w, "Profiling is not enabled", http.StatusForbidden) + return + } + + // Get the default pprof handler and serve + if handler, pattern := http.DefaultServeMux.Handler(r); pattern != "" { + handler.ServeHTTP(w, r) + } else { + http.NotFound(w, r) + } +} + +// rawPprofHandler handles the raw pprof endpoints that return actual pprof data +func (s *Server) rawPprofHandler(w http.ResponseWriter, r *http.Request) { + // Check if pprof is enabled + if !s.pprofEnabled { + http.Error(w, "Profiling is not enabled", http.StatusForbidden) + return + } + + // Map the /api/v1/debug/raw/pprof/* path to /debug/pprof/* + originalPath := r.URL.Path + r.URL.Path = strings.Replace(originalPath, "/api/v1/debug/raw/pprof", "/debug/pprof", 1) + + // Get the default pprof handler and serve + if handler, pattern := http.DefaultServeMux.Handler(r); pattern != "" { + handler.ServeHTTP(w, r) + } else { + http.NotFound(w, r) + } + + // Restore the original path + r.URL.Path = originalPath +} diff --git a/supernode/node/supernode/gateway/swagger.go b/supernode/transport/gateway/swagger.go similarity index 59% rename from supernode/node/supernode/gateway/swagger.go rename to supernode/transport/gateway/swagger.go index ee815e52..4bcd3f3d 100644 --- a/supernode/node/supernode/gateway/swagger.go +++ b/supernode/transport/gateway/swagger.go @@ -22,30 +22,30 @@ const swaggerUIHTML = ` *, *:before, *:after { box-sizing: inherit; } body { margin:0; background: #fafafa; } - - -
- - - - - + + +
+ + + + + ` // serveSwaggerJSON serves the OpenAPI specification diff --git a/supernode/transport/gateway/swagger.json b/supernode/transport/gateway/swagger.json new file mode 100644 index 00000000..c3944e9d --- /dev/null +++ b/supernode/transport/gateway/swagger.json @@ -0,0 +1,857 @@ +{ + "swagger": "2.0", + "info": { + "title": "supernode/service.proto", + "version": "version not set" + }, + "tags": [ + { + "name": "SupernodeService" + } + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/api/v1/debug/raw/pprof": { + "get": { + "summary": "Raw pprof endpoints - return standard pprof output directly", + "operationId": "SupernodeService_GetRawPprof", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/allocs": { + "get": { + "operationId": "SupernodeService_GetRawPprofAllocs", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/block": { + "get": { + "operationId": "SupernodeService_GetRawPprofBlock", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/cmdline": { + "get": { + "operationId": "SupernodeService_GetRawPprofCmdline", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/goroutine": { + "get": { + "operationId": "SupernodeService_GetRawPprofGoroutine", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/heap": { + "get": { + "operationId": "SupernodeService_GetRawPprofHeap", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/mutex": { + "get": { + "operationId": "SupernodeService_GetRawPprofMutex", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/profile": { + "get": { + "operationId": "SupernodeService_GetRawPprofProfile", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "seconds", + "description": "CPU profile duration in seconds (default 30)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/symbol": { + "get": { + "operationId": "SupernodeService_GetRawPprofSymbol", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/threadcreate": { + "get": { + "operationId": "SupernodeService_GetRawPprofThreadcreate", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/trace": { + "get": { + "operationId": "SupernodeService_GetRawPprofTrace", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/services": { + "get": { + "operationId": "SupernodeService_ListServices", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeListServicesResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/status": { + "get": { + "operationId": "SupernodeService_GetStatus", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeStatusResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "includeP2pMetrics", + "description": "Optional: include detailed P2P metrics in the response\nMaps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true", + "in": "query", + "required": false, + "type": "boolean" + } + ], + "tags": [ + "SupernodeService" + ] + } + } + }, + "definitions": { + "DhtMetricsBatchRetrievePoint": { + "type": "object", + "properties": { + "timeUnix": { + "type": "string", + "format": "int64", + "title": "event time (unix seconds)" + }, + "keys": { + "type": "integer", + "format": "int32", + "title": "keys requested" + }, + "required": { + "type": "integer", + "format": "int32", + "title": "required count" + }, + "foundLocal": { + "type": "integer", + "format": "int32", + "title": "found locally" + }, + "foundNetwork": { + "type": "integer", + "format": "int32", + "title": "found on network" + }, + "durationMs": { + "type": "string", + "format": "int64", + "title": "duration in milliseconds" + } + } + }, + "DhtMetricsStoreSuccessPoint": { + "type": "object", + "properties": { + "timeUnix": { + "type": "string", + "format": "int64", + "title": "event time (unix seconds)" + }, + "requests": { + "type": "integer", + "format": "int32", + "title": "total node RPCs attempted" + }, + "successful": { + "type": "integer", + "format": "int32", + "title": "successful node RPCs" + }, + "successRate": { + "type": "number", + "format": "double", + "title": "percentage (0-100)" + } + } + }, + "P2PMetricsBanEntry": { + "type": "object", + "properties": { + "id": { + "type": "string", + "title": "printable ID" + }, + "ip": { + "type": "string", + "title": "last seen IP" + }, + "port": { + "type": "integer", + "format": "int64", + "title": "last seen port" + }, + "count": { + "type": "integer", + "format": "int32", + "title": "failure count" + }, + "createdAtUnix": { + "type": "string", + "format": "int64", + "title": "first ban time (unix seconds)" + }, + "ageSeconds": { + "type": "string", + "format": "int64", + "title": "age in seconds" + } + }, + "title": "Ban list entry" + }, + "P2PMetricsDatabaseStats": { + "type": "object", + "properties": { + "p2pDbSizeMb": { + "type": "number", + "format": "double" + }, + "p2pDbRecordsCount": { + "type": "string", + "format": "int64" + } + }, + "title": "DB stats" + }, + "P2PMetricsDhtMetrics": { + "type": "object", + "properties": { + "storeSuccessRecent": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/DhtMetricsStoreSuccessPoint" + } + }, + "batchRetrieveRecent": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/DhtMetricsBatchRetrievePoint" + } + }, + "hotPathBannedSkips": { + "type": "string", + "format": "int64", + "title": "counter" + }, + "hotPathBanIncrements": { + "type": "string", + "format": "int64", + "title": "counter" + } + }, + "title": "Rolling DHT metrics snapshot" + }, + "P2PMetricsDiskStatus": { + "type": "object", + "properties": { + "allMb": { + "type": "number", + "format": "double" + }, + "usedMb": { + "type": "number", + "format": "double" + }, + "freeMb": { + "type": "number", + "format": "double" + } + }, + "title": "Disk status" + }, + "P2PMetricsHandleCounters": { + "type": "object", + "properties": { + "total": { + "type": "string", + "format": "int64" + }, + "success": { + "type": "string", + "format": "int64" + }, + "failure": { + "type": "string", + "format": "int64" + }, + "timeout": { + "type": "string", + "format": "int64" + } + }, + "title": "Per-handler counters from network layer" + }, + "ResourcesCPU": { + "type": "object", + "properties": { + "usagePercent": { + "type": "number", + "format": "double", + "title": "CPU usage percentage (0-100)" + }, + "cores": { + "type": "integer", + "format": "int32", + "title": "Number of CPU cores" + } + } + }, + "ResourcesMemory": { + "type": "object", + "properties": { + "totalGb": { + "type": "number", + "format": "double", + "title": "Total memory in GB" + }, + "usedGb": { + "type": "number", + "format": "double", + "title": "Used memory in GB" + }, + "availableGb": { + "type": "number", + "format": "double", + "title": "Available memory in GB" + }, + "usagePercent": { + "type": "number", + "format": "double", + "title": "Memory usage percentage (0-100)" + } + } + }, + "ResourcesStorage": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Storage path being monitored" + }, + "totalBytes": { + "type": "string", + "format": "uint64" + }, + "usedBytes": { + "type": "string", + "format": "uint64" + }, + "availableBytes": { + "type": "string", + "format": "uint64" + }, + "usagePercent": { + "type": "number", + "format": "double", + "title": "Storage usage percentage (0-100)" + } + } + }, + "StatusResponseNetwork": { + "type": "object", + "properties": { + "peersCount": { + "type": "integer", + "format": "int32", + "title": "Number of connected peers in P2P network" + }, + "peerAddresses": { + "type": "array", + "items": { + "type": "string" + }, + "title": "List of connected peer addresses (optional, may be empty for privacy)" + } + }, + "title": "Network information" + }, + "StatusResponseP2PMetrics": { + "type": "object", + "properties": { + "dhtMetrics": { + "$ref": "#/definitions/P2PMetricsDhtMetrics" + }, + "networkHandleMetrics": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/P2PMetricsHandleCounters" + } + }, + "connPoolMetrics": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "int64" + } + }, + "banList": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/P2PMetricsBanEntry" + } + }, + "database": { + "$ref": "#/definitions/P2PMetricsDatabaseStats" + }, + "disk": { + "$ref": "#/definitions/P2PMetricsDiskStatus" + } + }, + "title": "P2P metrics and diagnostics (additive field)" + }, + "StatusResponseResources": { + "type": "object", + "properties": { + "cpu": { + "$ref": "#/definitions/ResourcesCPU" + }, + "memory": { + "$ref": "#/definitions/ResourcesMemory" + }, + "storageVolumes": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/ResourcesStorage" + } + }, + "hardwareSummary": { + "type": "string", + "title": "Formatted hardware summary (e.g., \"8 cores / 32GB RAM\")" + } + }, + "title": "System resource information" + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string" + } + }, + "additionalProperties": {} + }, + "rpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "supernodeListServicesResponse": { + "type": "object", + "properties": { + "services": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/supernodeServiceInfo" + } + }, + "count": { + "type": "integer", + "format": "int32" + } + } + }, + "supernodeRawPprofResponse": { + "type": "object", + "properties": { + "data": { + "type": "string", + "format": "byte", + "title": "Raw pprof data exactly as returned by runtime/pprof" + } + } + }, + "supernodeServiceInfo": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "methods": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "supernodeStatusResponse": { + "type": "object", + "properties": { + "version": { + "type": "string", + "title": "Supernode version" + }, + "uptimeSeconds": { + "type": "string", + "format": "uint64", + "title": "Uptime in seconds" + }, + "resources": { + "$ref": "#/definitions/StatusResponseResources" + }, + "registeredServices": { + "type": "array", + "items": { + "type": "string" + }, + "title": "All registered/available services" + }, + "network": { + "$ref": "#/definitions/StatusResponseNetwork", + "title": "P2P network information" + }, + "rank": { + "type": "integer", + "format": "int32", + "title": "Rank in the top supernodes list (0 if not in top list)" + }, + "ipAddress": { + "type": "string", + "title": "Supernode IP address with port (e.g., \"192.168.1.1:4445\")" + }, + "p2pMetrics": { + "$ref": "#/definitions/StatusResponseP2PMetrics" + } + }, + "title": "The StatusResponse represents system status with clear organization" + } + } +} diff --git a/supernode/transport/grpc/cascade/handler.go b/supernode/transport/grpc/cascade/handler.go new file mode 100644 index 00000000..96237b98 --- /dev/null +++ b/supernode/transport/grpc/cascade/handler.go @@ -0,0 +1,356 @@ +package cascade + +import ( + "encoding/hex" + "fmt" + "hash" + "io" + "os" + "path/filepath" + "time" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + tasks "github.com/LumeraProtocol/supernode/v2/pkg/task" + cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" + "lukechampine.com/blake3" +) + +type ActionServer struct { + pb.UnimplementedCascadeServiceServer + factory cascadeService.CascadeServiceFactory + tracker tasks.Tracker + uploadTimeout time.Duration + downloadTimeout time.Duration +} + +const ( + serviceCascadeUpload = "cascade.upload" + serviceCascadeDownload = "cascade.download" +) + +// NewCascadeActionServer creates a new CascadeActionServer with injected service and tracker +func NewCascadeActionServer(factory cascadeService.CascadeServiceFactory, tracker tasks.Tracker, uploadTO, downloadTO time.Duration) *ActionServer { + if uploadTO <= 0 { + uploadTO = 30 * time.Minute + } + if downloadTO <= 0 { + downloadTO = 30 * time.Minute + } + return &ActionServer{factory: factory, tracker: tracker, uploadTimeout: uploadTO, downloadTimeout: downloadTO} +} + +// calculateOptimalChunkSize returns an optimal chunk size based on file size +// to balance throughput and memory usage + +var ( + startedTask bool + handle *tasks.Handle +) + +func calculateOptimalChunkSize(fileSize int64) int { + const ( + minChunkSize = 64 * 1024 // 64 KB minimum + maxChunkSize = 4 * 1024 * 1024 // 4 MB maximum for 1GB+ files + smallFileThreshold = 1024 * 1024 // 1 MB + mediumFileThreshold = 50 * 1024 * 1024 // 50 MB + largeFileThreshold = 500 * 1024 * 1024 // 500 MB + ) + + var chunkSize int + + switch { + case fileSize <= smallFileThreshold: + chunkSize = minChunkSize + case fileSize <= mediumFileThreshold: + chunkSize = 256 * 1024 + case fileSize <= largeFileThreshold: + chunkSize = 1024 * 1024 + default: + chunkSize = maxChunkSize + } + + if chunkSize < minChunkSize { + chunkSize = minChunkSize + } + if chunkSize > maxChunkSize { + chunkSize = maxChunkSize + } + return chunkSize +} + +func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) error { + fields := logtrace.Fields{ + logtrace.FieldMethod: "Register", + logtrace.FieldModule: "CascadeActionServer", + } + + ctx := stream.Context() + logtrace.Info(ctx, "register: stream open", fields) + + const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit + + var ( + metadata *pb.Metadata + totalSize int + ) + + hasher, tempFile, tempFilePath, err := initializeHasherAndTempFile() + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to initialize hasher and temp file", fields) + return fmt.Errorf("initializing hasher and temp file: %w", err) + } + defer func(tempFile *os.File) { + err := tempFile.Close() + if err != nil && !errors.Is(err, os.ErrClosed) { + fields[logtrace.FieldError] = err.Error() + logtrace.Warn(ctx, "error closing temp file", fields) + } + }(tempFile) + + for { + req, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "error receiving stream data", fields) + return fmt.Errorf("failed to receive stream data: %w", err) + } + + switch x := req.RequestType.(type) { + case *pb.RegisterRequest_Chunk: + if x.Chunk != nil { + if _, err := hasher.Write(x.Chunk.Data); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to write chunk to hasher", fields) + return fmt.Errorf("hashing error: %w", err) + } + if _, err := tempFile.Write(x.Chunk.Data); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to write chunk to file", fields) + return fmt.Errorf("file write error: %w", err) + } + totalSize += len(x.Chunk.Data) + if totalSize > maxFileSize { + fields[logtrace.FieldError] = "file size exceeds 1GB limit" + fields["total_size"] = totalSize + logtrace.Error(ctx, "upload rejected: file too large", fields) + return fmt.Errorf("file size %d exceeds maximum allowed size of 1GB", totalSize) + } + // Keep chunk logs at debug to avoid verbosity + logtrace.Debug(ctx, "received data chunk", logtrace.Fields{"chunk_size": len(x.Chunk.Data), "total_size_so_far": totalSize}) + } + case *pb.RegisterRequest_Metadata: + metadata = x.Metadata + // Set correlation ID for the rest of the flow + ctx = logtrace.CtxWithCorrelationID(ctx, metadata.ActionId) + fields[logtrace.FieldTaskID] = metadata.GetTaskId() + fields[logtrace.FieldActionID] = metadata.GetActionId() + logtrace.Info(ctx, "register: metadata received", fields) + // Start live task tracking on first metadata (covers remaining stream and processing) + if !startedTask { + startedTask = true + handle = tasks.StartWith(server.tracker, ctx, serviceCascadeUpload, metadata.ActionId, server.uploadTimeout) + defer handle.End(ctx) + } + } + } + + if metadata == nil { + logtrace.Error(ctx, "no metadata received in stream", fields) + return fmt.Errorf("no metadata received") + } + fields[logtrace.FieldTaskID] = metadata.GetTaskId() + fields[logtrace.FieldActionID] = metadata.GetActionId() + logtrace.Info(ctx, "register: stream upload complete", fields) + + if err := tempFile.Sync(); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to sync temp file", fields) + return fmt.Errorf("failed to sync temp file: %w", err) + } + + hash := hasher.Sum(nil) + hashHex := hex.EncodeToString(hash) + fields[logtrace.FieldHashHex] = hashHex + logtrace.Info(ctx, "register: hash computed", fields) + + targetPath, err := replaceTempDirWithTaskDir(metadata.GetTaskId(), tempFilePath, tempFile) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to replace temp dir with task dir", fields) + return fmt.Errorf("failed to replace temp dir with task dir: %w", err) + } + + task := server.factory.NewCascadeRegistrationTask() + logtrace.Info(ctx, "register: task start", fields) + err = task.Register(ctx, &cascadeService.RegisterRequest{ + TaskID: metadata.TaskId, + ActionID: metadata.ActionId, + DataHash: hash, + DataSize: totalSize, + FilePath: targetPath, + }, func(resp *cascadeService.RegisterResponse) error { + grpcResp := &pb.RegisterResponse{ + EventType: pb.SupernodeEventType(resp.EventType), + Message: resp.Message, + TxHash: resp.TxHash, + } + if err := stream.Send(grpcResp); err != nil { + logtrace.Error(ctx, "failed to send response to client", logtrace.Fields{logtrace.FieldError: err.Error()}) + return err + } + // Mirror event to Info logs for high-level tracing + logtrace.Info(ctx, "register: event", logtrace.Fields{"event_type": resp.EventType, "message": resp.Message, logtrace.FieldTxHash: resp.TxHash, logtrace.FieldActionID: metadata.ActionId, logtrace.FieldTaskID: metadata.TaskId}) + return nil + }) + if err != nil { + logtrace.Error(ctx, "registration task failed", logtrace.Fields{logtrace.FieldError: err.Error()}) + return fmt.Errorf("registration failed: %w", err) + } + logtrace.Info(ctx, "register: task ok", fields) + return nil +} + +func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeService_DownloadServer) error { + ctx := stream.Context() + fields := logtrace.Fields{ + logtrace.FieldMethod: "Download", + logtrace.FieldModule: "CascadeActionServer", + logtrace.FieldActionID: req.GetActionId(), + } + logtrace.Debug(ctx, "download request received", fields) + + // Start live task tracking for the entire download RPC (including file streaming) + dlHandle := tasks.StartWith(server.tracker, ctx, serviceCascadeDownload, req.GetActionId(), server.downloadTimeout) + defer dlHandle.End(ctx) + + // Prepare to capture decoded file path from task events + var decodedFilePath string + var tmpDir string + + task := server.factory.NewCascadeRegistrationTask() + // Run cascade task Download; stream events back to client + err := task.Download(ctx, &cascadeService.DownloadRequest{ActionID: req.GetActionId(), Signature: req.GetSignature()}, func(resp *cascadeService.DownloadResponse) error { + // Forward event to gRPC client + evt := &pb.DownloadResponse{ + ResponseType: &pb.DownloadResponse_Event{ + Event: &pb.DownloadEvent{ + EventType: pb.SupernodeEventType(resp.EventType), + Message: resp.Message, + }, + }, + } + if sendErr := stream.Send(evt); sendErr != nil { + return sendErr + } + // Capture decode-completed info for streaming + if resp.EventType == cascadeService.SupernodeEventTypeDecodeCompleted { + decodedFilePath = resp.FilePath + tmpDir = resp.DownloadedDir + } + return nil + }) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "download task failed", fields) + return fmt.Errorf("download task failed: %w", err) + } + + if decodedFilePath == "" { + logtrace.Warn(ctx, "decode completed without file path", fields) + return nil + } + + // Notify client that server is ready to stream the file + logtrace.Debug(ctx, "download: serve ready", logtrace.Fields{"event_type": cascadeService.SupernodeEventTypeServeReady, logtrace.FieldActionID: req.GetActionId()}) + if err := stream.Send(&pb.DownloadResponse{ResponseType: &pb.DownloadResponse_Event{Event: &pb.DownloadEvent{EventType: pb.SupernodeEventType_SERVE_READY, Message: "Serve ready"}}}); err != nil { + return fmt.Errorf("send serve-ready: %w", err) + } + + // Stream file content in chunks + fi, err := os.Stat(decodedFilePath) + if err != nil { + return fmt.Errorf("stat decoded file: %w", err) + } + chunkSize := calculateOptimalChunkSize(fi.Size()) + f, err := os.Open(decodedFilePath) + if err != nil { + return fmt.Errorf("open decoded file: %w", err) + } + defer f.Close() + + buf := make([]byte, chunkSize) + for { + n, rerr := f.Read(buf) + if n > 0 { + if err := stream.Send(&pb.DownloadResponse{ResponseType: &pb.DownloadResponse_Chunk{Chunk: &pb.DataChunk{Data: append([]byte(nil), buf[:n]...)}}}); err != nil { + return fmt.Errorf("send chunk: %w", err) + } + } + if rerr == io.EOF { + break + } + if rerr != nil { + return fmt.Errorf("read decoded file: %w", rerr) + } + } + + // Cleanup temp directory if provided + if tmpDir != "" { + if cerr := task.CleanupDownload(ctx, tmpDir); cerr != nil { + logtrace.Warn(ctx, "cleanup of tmp dir failed", logtrace.Fields{"tmp_dir": tmpDir, logtrace.FieldError: cerr.Error()}) + } + } + + logtrace.Debug(ctx, "download stream completed", fields) + return nil +} + +// initializeHasherAndTempFile prepares a hasher and a temporary file to stream upload data into. +func initializeHasherAndTempFile() (hash.Hash, *os.File, string, error) { + // Create a temp directory for the upload + tmpDir, err := os.MkdirTemp("", "supernode-upload-*") + if err != nil { + return nil, nil, "", fmt.Errorf("create temp dir: %w", err) + } + + // Create a file within the temp directory + filePath := filepath.Join(tmpDir, "data.bin") + f, err := os.Create(filePath) + if err != nil { + return nil, nil, "", fmt.Errorf("create temp file: %w", err) + } + + // Create a BLAKE3 hasher (32 bytes output) + hasher := blake3.New(32, nil) + return hasher, f, filePath, nil +} + +// replaceTempDirWithTaskDir moves the uploaded file into a task-scoped directory +// and returns the new absolute path. +func replaceTempDirWithTaskDir(taskID, tempFilePath string, tempFile *os.File) (string, error) { + // Ensure data is flushed + _ = tempFile.Sync() + // Close now; deferred close may run later and is safe to ignore + _ = tempFile.Close() + + // Create a stable target directory under OS temp + targetDir := filepath.Join(os.TempDir(), "supernode", "uploads", taskID) + if err := os.MkdirAll(targetDir, 0700); err != nil { + return "", fmt.Errorf("create task dir: %w", err) + } + + newPath := filepath.Join(targetDir, filepath.Base(tempFilePath)) + if err := os.Rename(tempFilePath, newPath); err != nil { + return "", fmt.Errorf("move uploaded file: %w", err) + } + + // Attempt to cleanup the original temp directory + _ = os.RemoveAll(filepath.Dir(tempFilePath)) + return newPath, nil +} diff --git a/supernode/transport/grpc/status/handler.go b/supernode/transport/grpc/status/handler.go new file mode 100644 index 00000000..e543e7b1 --- /dev/null +++ b/supernode/transport/grpc/status/handler.go @@ -0,0 +1,59 @@ +package server + +import ( + "context" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + pbcascade "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" + statussvc "github.com/LumeraProtocol/supernode/v2/supernode/status" +) + +// SupernodeServer implements the SupernodeService gRPC service +type SupernodeServer struct { + pb.UnimplementedSupernodeServiceServer + statusService *statussvc.SupernodeStatusService + gatewayPort int +} + +// NewSupernodeServer creates a new SupernodeServer +func NewSupernodeServer(statusService *statussvc.SupernodeStatusService) *SupernodeServer { + return &SupernodeServer{statusService: statusService, gatewayPort: 8002} +} + +// SetGatewayPort sets the gateway port for internal proxy requests +func (s *SupernodeServer) SetGatewayPort(port int) { + s.gatewayPort = port +} + +// GetStatus implements SupernodeService.GetStatus +func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) { + return s.statusService.GetStatus(ctx, req.GetIncludeP2PMetrics()) +} + +// ListServices implements SupernodeService.ListServices +func (s *SupernodeServer) ListServices(ctx context.Context, _ *pb.ListServicesRequest) (*pb.ListServicesResponse, error) { + // Describe available services and methods/streams exposed by this node + var services []*pb.ServiceInfo + + // SupernodeService methods + var supernodeMethods []string + for _, m := range pb.SupernodeService_ServiceDesc.Methods { + supernodeMethods = append(supernodeMethods, m.MethodName) + } + services = append(services, &pb.ServiceInfo{ + Name: pb.SupernodeService_ServiceDesc.ServiceName, + Methods: supernodeMethods, + }) + + // CascadeService streams (surface stream names as methods for discovery) + var cascadeMethods []string + for _, st := range pbcascade.CascadeService_ServiceDesc.Streams { + cascadeMethods = append(cascadeMethods, st.StreamName) + } + services = append(services, &pb.ServiceInfo{ + Name: pbcascade.CascadeService_ServiceDesc.ServiceName, + Methods: cascadeMethods, + }) + + return &pb.ListServicesResponse{Services: services, Count: int32(len(services))}, nil +} diff --git a/supernode/transport/grpc/status/pprof_handlers.go b/supernode/transport/grpc/status/pprof_handlers.go new file mode 100644 index 00000000..00be8b99 --- /dev/null +++ b/supernode/transport/grpc/status/pprof_handlers.go @@ -0,0 +1,252 @@ +package server + +import ( + "context" + "fmt" + "io" + "net/http" + "os" + "strings" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" +) + +// isPprofEnabled checks if pprof should be enabled based on chain ID or environment variable +func (s *SupernodeServer) isPprofEnabled() bool { + // Check if chain ID contains testnet + if s.statusService != nil && s.statusService.GetChainID() != "" { + if strings.Contains(strings.ToLower(s.statusService.GetChainID()), "testnet") { + return true + } + } + + // Check environment variable + return os.Getenv("ENABLE_PPROF") == "true" +} + +// Raw pprof handlers - these proxy to the actual pprof HTTP endpoints + +// pprofProxy makes an internal HTTP request to the actual pprof endpoint +func (s *SupernodeServer) pprofProxy(path string, queryParams string) ([]byte, error) { + // Determine the port - use gateway port if available, otherwise use default + port := 8002 // Default gateway port + if s.gatewayPort != 0 { + port = s.gatewayPort + } + + // Construct the URL + url := fmt.Sprintf("http://localhost:%d/debug/pprof%s", port, path) + if queryParams != "" { + url += "?" + queryParams + } + + // Make the HTTP request + resp, err := http.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + // Read the response body + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return data, nil +} + +// GetRawPprof returns the pprof index +func (s *SupernodeServer) GetRawPprof(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte("Profiling disabled")}, nil + } + + data, err := s.pprofProxy("/", "") + if err != nil { + return &pb.RawPprofResponse{Data: []byte(fmt.Sprintf("Error: %v", err))}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofHeap returns raw heap profile +func (s *SupernodeServer) GetRawPprofHeap(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/heap", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofGoroutine returns raw goroutine profile +func (s *SupernodeServer) GetRawPprofGoroutine(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/goroutine", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofAllocs returns raw allocations profile +func (s *SupernodeServer) GetRawPprofAllocs(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/allocs", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofBlock returns raw block profile +func (s *SupernodeServer) GetRawPprofBlock(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/block", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofMutex returns raw mutex profile +func (s *SupernodeServer) GetRawPprofMutex(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/mutex", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofThreadcreate returns raw threadcreate profile +func (s *SupernodeServer) GetRawPprofThreadcreate(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/threadcreate", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofProfile returns raw CPU profile +func (s *SupernodeServer) GetRawPprofProfile(ctx context.Context, req *pb.RawPprofCpuRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + seconds := req.GetSeconds() + if seconds <= 0 { + seconds = 30 + } + if seconds > 300 { + seconds = 300 + } + + queryParams := fmt.Sprintf("seconds=%d", seconds) + data, err := s.pprofProxy("/profile", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofCmdline returns the command line +func (s *SupernodeServer) GetRawPprofCmdline(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + data, err := s.pprofProxy("/cmdline", "") + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofSymbol returns symbol information +func (s *SupernodeServer) GetRawPprofSymbol(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + data, err := s.pprofProxy("/symbol", "") + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofTrace returns execution trace +func (s *SupernodeServer) GetRawPprofTrace(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + // Trace typically takes a seconds parameter + queryParams := "seconds=1" + data, err := s.pprofProxy("/trace", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} diff --git a/supernode/node/supernode/server/server.go b/supernode/transport/grpc/status/server.go similarity index 75% rename from supernode/node/supernode/server/server.go rename to supernode/transport/grpc/status/server.go index 774be094..b7938983 100644 --- a/supernode/node/supernode/server/server.go +++ b/supernode/transport/grpc/status/server.go @@ -7,7 +7,6 @@ import ( "strconv" "strings" - "google.golang.org/grpc" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" @@ -22,14 +21,12 @@ import ( "github.com/cosmos/cosmos-sdk/crypto/keyring" ) -type service interface { - Desc() *grpc.ServiceDesc -} - // Server represents supernode server type Server struct { - config *Config - services []service + identity string + listenAddrs string + port int + services []grpcserver.ServiceDesc name string kr keyring.Keyring grpcServer *grpcserver.Server @@ -48,12 +45,12 @@ func (server *Server) Run(ctx context.Context) error { // Set up gRPC logging logtrace.SetGRPCLogger() - logtrace.Debug(ctx, "Server identity configured", logtrace.Fields{logtrace.FieldModule: "server", "identity": server.config.Identity}) - logtrace.Debug(ctx, "Server listening", logtrace.Fields{logtrace.FieldModule: "server", "addresses": server.config.ListenAddresses}) + logtrace.Debug(ctx, "Server identity configured", logtrace.Fields{logtrace.FieldModule: "server", "identity": server.identity}) + logtrace.Debug(ctx, "Server listening", logtrace.Fields{logtrace.FieldModule: "server", "addresses": server.listenAddrs}) group, ctx := errgroup.WithContext(ctx) - addresses := strings.Split(server.config.ListenAddresses, ",") + addresses := strings.Split(server.listenAddrs, ",") if err := server.setupGRPCServer(); err != nil { logtrace.Fatal(ctx, "Failed to setup gRPC server", logtrace.Fields{logtrace.FieldModule: "server", logtrace.FieldError: err.Error()}) } @@ -70,7 +67,7 @@ func (server *Server) Run(ctx context.Context) error { opts.WriteBufferSize = (8 * 1024 * 1024) // 8MB TCP buffer for _, address := range addresses { - addr := net.JoinHostPort(strings.TrimSpace(address), strconv.Itoa(server.config.Port)) + addr := net.JoinHostPort(strings.TrimSpace(address), strconv.Itoa(server.port)) address := addr // Create a new variable to avoid closure issues group.Go(func() error { @@ -87,7 +84,7 @@ func (server *Server) setupGRPCServer() error { serverCreds, err := ltc.NewServerCreds(<c.ServerOptions{ CommonOptions: ltc.CommonOptions{ Keyring: server.kr, - LocalIdentity: server.config.Identity, + LocalIdentity: server.identity, PeerType: securekeyx.Supernode, Validator: lumera.NewSecureKeyExchangeValidator(server.lumeraClient), }, @@ -107,29 +104,13 @@ func (server *Server) setupGRPCServer() error { server.healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) // Register all services - var supernodeServer *SupernodeServer - for _, service := range server.services { - server.grpcServer.RegisterService(service.Desc(), service) - server.healthServer.SetServingStatus(service.Desc().ServiceName, healthpb.HealthCheckResponse_SERVING) - - // Keep reference to SupernodeServer - if ss, ok := service.(*SupernodeServer); ok { - supernodeServer = ss + for _, s := range server.services { + server.grpcServer.RegisterService(s.Desc, s.Service) + if s.Desc != nil { + server.healthServer.SetServingStatus(s.Desc.ServiceName, healthpb.HealthCheckResponse_SERVING) } } - // After all services are registered, update SupernodeServer with the list - if supernodeServer != nil { - // Register all custom services - for _, svc := range server.services { - supernodeServer.RegisterService(svc.Desc().ServiceName, svc.Desc()) - } - - // Also register the health service - healthDesc := healthpb.Health_ServiceDesc - supernodeServer.RegisterService(healthDesc.ServiceName, &healthDesc) - } - return nil } @@ -146,7 +127,10 @@ func (server *Server) Close() { // Set all services to NOT_SERVING before shutdown server.healthServer.SetServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING) for _, service := range server.services { - serviceName := service.Desc().ServiceName + serviceName := "" + if service.Desc != nil { + serviceName = service.Desc.ServiceName + } server.healthServer.SetServingStatus(serviceName, healthpb.HealthCheckResponse_NOT_SERVING) } } @@ -158,13 +142,14 @@ func (server *Server) Close() { } // New returns a new Server instance. -func New(config *Config, name string, kr keyring.Keyring, lumeraClient lumera.Client, services ...service) (*Server, error) { - if config == nil { - return nil, fmt.Errorf("config is nil") +func New(identity, listenAddrs string, port int, name string, kr keyring.Keyring, lumeraClient lumera.Client, services ...grpcserver.ServiceDesc) (*Server, error) { + if listenAddrs == "" { + return nil, fmt.Errorf("listen addresses cannot be empty") } - return &Server{ - config: config, + identity: identity, + listenAddrs: listenAddrs, + port: port, services: services, name: name, kr: kr, diff --git a/supernode/verifier/interface.go b/supernode/verifier/interface.go new file mode 100644 index 00000000..d2668c9c --- /dev/null +++ b/supernode/verifier/interface.go @@ -0,0 +1,35 @@ +package verifier + +import "context" + +// ConfigVerifierService defines verification methods +type ConfigVerifierService interface { + VerifyConfig(ctx context.Context) (*VerificationResult, error) +} + +// ConfigError represents a config validation error or warning +type ConfigError struct { + Field string + Expected string + Actual string + Message string +} + +// VerificationResult holds the outcome of config verification +type VerificationResult struct { + Valid bool + Errors []ConfigError + Warnings []ConfigError +} + +func (r *VerificationResult) IsValid() bool { return r.Valid && len(r.Errors) == 0 } +func (r *VerificationResult) HasWarnings() bool { return len(r.Warnings) > 0 } +func (r *VerificationResult) Summary() string { + if !r.IsValid() { + return "invalid: check errors" + } + if r.HasWarnings() { + return "valid with warnings" + } + return "valid" +} diff --git a/supernode/verifier/verifier.go b/supernode/verifier/verifier.go new file mode 100644 index 00000000..4875fe99 --- /dev/null +++ b/supernode/verifier/verifier.go @@ -0,0 +1,129 @@ +package verifier + +import ( + "context" + "fmt" + "net" + + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + snmodule "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" + "github.com/LumeraProtocol/supernode/v2/supernode/config" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type ConfigVerifier struct { + config *config.Config + lumeraClient lumera.Client + keyring keyring.Keyring +} + +func NewConfigVerifier(cfg *config.Config, client lumera.Client, kr keyring.Keyring) ConfigVerifierService { + return &ConfigVerifier{config: cfg, lumeraClient: client, keyring: kr} +} + +func (cv *ConfigVerifier) VerifyConfig(ctx context.Context) (*VerificationResult, error) { + result := &VerificationResult{Valid: true, Errors: []ConfigError{}, Warnings: []ConfigError{}} + logtrace.Debug(ctx, "Starting config verification", logtrace.Fields{"identity": cv.config.SupernodeConfig.Identity, "key_name": cv.config.SupernodeConfig.KeyName, "p2p_port": cv.config.P2PConfig.Port}) + if err := cv.checkKeyExists(result); err != nil { + return result, err + } + if err := cv.checkIdentityMatches(result); err != nil { + return result, err + } + if !result.IsValid() { + return result, nil + } + supernodeInfo, err := cv.checkSupernodeExists(ctx, result) + if err != nil { + return result, err + } + if supernodeInfo == nil { + return result, nil + } + cv.checkSupernodeState(result, supernodeInfo) + cv.checkPortsAvailable(result) + logtrace.Debug(ctx, "Config verification completed", logtrace.Fields{"valid": result.IsValid(), "errors": len(result.Errors), "warnings": len(result.Warnings)}) + return result, nil +} + +func (cv *ConfigVerifier) checkKeyExists(result *VerificationResult) error { + _, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) + if err != nil { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "key_name", Actual: cv.config.SupernodeConfig.KeyName, Message: fmt.Sprintf("Key '%s' not found in keyring", cv.config.SupernodeConfig.KeyName)}) + } + return nil +} + +func (cv *ConfigVerifier) checkIdentityMatches(result *VerificationResult) error { + keyInfo, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) + if err != nil { + return nil + } + pubKey, err := keyInfo.GetPubKey() + if err != nil { + return fmt.Errorf("failed to get public key for key '%s': %w", cv.config.SupernodeConfig.KeyName, err) + } + addr := sdk.AccAddress(pubKey.Address()) + if addr.String() != cv.config.SupernodeConfig.Identity { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "identity", Expected: addr.String(), Actual: cv.config.SupernodeConfig.Identity, Message: fmt.Sprintf("Key '%s' resolves to %s but config identity is %s", cv.config.SupernodeConfig.KeyName, addr.String(), cv.config.SupernodeConfig.Identity)}) + } + return nil +} + +func (cv *ConfigVerifier) checkSupernodeExists(ctx context.Context, result *VerificationResult) (*snmodule.SuperNodeInfo, error) { + sn, err := cv.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, cv.config.SupernodeConfig.Identity) + if err != nil { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "registration", Actual: "not_registered", Message: fmt.Sprintf("Supernode not registered on chain for address %s", cv.config.SupernodeConfig.Identity)}) + return nil, nil + } + return sn, nil +} + +func (cv *ConfigVerifier) checkP2PPortMatches(result *VerificationResult, supernodeInfo *snmodule.SuperNodeInfo) { + configPort := fmt.Sprintf("%d", cv.config.P2PConfig.Port) + chainPort := supernodeInfo.P2PPort + if chainPort != "" && chainPort != configPort { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "p2p_port", Expected: chainPort, Actual: configPort, Message: fmt.Sprintf("P2P port mismatch: config=%s, chain=%s", configPort, chainPort)}) + } +} + +func (cv *ConfigVerifier) checkSupernodeState(result *VerificationResult, supernodeInfo *snmodule.SuperNodeInfo) { + if supernodeInfo.CurrentState != "" && supernodeInfo.CurrentState != "SUPERNODE_STATE_ACTIVE" { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "state", Expected: "SUPERNODE_STATE_ACTIVE", Actual: supernodeInfo.CurrentState, Message: fmt.Sprintf("Supernode state is %s (expected ACTIVE)", supernodeInfo.CurrentState)}) + } +} + +func (cv *ConfigVerifier) checkPortsAvailable(result *VerificationResult) { + if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.SupernodeConfig.Port)) { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "supernode_port", Actual: fmt.Sprintf("%d", cv.config.SupernodeConfig.Port), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.SupernodeConfig.Port)}) + } + if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.P2PConfig.Port)) { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "p2p_port", Actual: fmt.Sprintf("%d", cv.config.P2PConfig.Port), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.P2PConfig.Port)}) + } + gatewayPort := int(cv.config.SupernodeConfig.GatewayPort) + if gatewayPort == 0 { + gatewayPort = 8002 + } + if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, gatewayPort) { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "gateway_port", Actual: fmt.Sprintf("%d", gatewayPort), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", gatewayPort)}) + } +} + +func (cv *ConfigVerifier) isPortAvailable(host string, port int) bool { + ln, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port)) + if err != nil { + return false + } + _ = ln.Close() + return true +} diff --git a/tests/system/e2e_cascade_test.go b/tests/system/e2e_cascade_test.go index 2db7ad09..b9af06d2 100644 --- a/tests/system/e2e_cascade_test.go +++ b/tests/system/e2e_cascade_test.go @@ -297,7 +297,7 @@ func TestCascadeE2E(t *testing.T) { t.Logf("Requesting cascade action with metadata: %s", metadata) t.Logf("Action type: %s, Price: %s, Expiration: %s", actionType, autoPrice, expirationTime) - response, err := lumeraClinet.ActionMsg().RequestAction(ctx, actionType, metadata, autoPrice, expirationTime) + response, _ := lumeraClinet.ActionMsg().RequestAction(ctx, actionType, metadata, autoPrice, expirationTime) txresp := response.TxResponse @@ -356,26 +356,38 @@ func TestCascadeE2E(t *testing.T) { // Step 9: Subscribe to all events and extract tx hash // --------------------------------------- - // Channel to receive the transaction hash - txHashCh := make(chan string, 1) - completionCh := make(chan bool, 1) - - // Subscribe to ALL events - err = actionClient.SubscribeToAllEvents(context.Background(), func(ctx context.Context, e event.Event) { - // Only capture TxhasReceived events - if e.Type == event.SDKTaskTxHashReceived { - if txHash, ok := e.Data[event.KeyTxHash].(string); ok && txHash != "" { - // Send the hash to our channel - txHashCh <- txHash - } - } - - // Also monitor for task completion - if e.Type == event.SDKTaskCompleted { - completionCh <- true - } - }) - require.NoError(t, err, "Failed to subscribe to events") + // Channels to receive async signals + txHashCh := make(chan string, 1) + completionCh := make(chan bool, 1) + errCh := make(chan string, 1) + + // Subscribe to ALL events (non-blocking sends to avoid handler stalls) + err = actionClient.SubscribeToAllEvents(context.Background(), func(ctx context.Context, e event.Event) { + // Log every event for debugging and capture key ones + t.Logf("SDK event: type=%s data=%v", e.Type, e.Data) + // Only capture TxhasReceived events + if e.Type == event.SDKTaskTxHashReceived { + if txHash, ok := e.Data[event.KeyTxHash].(string); ok && txHash != "" { + // Non-blocking send; drop if buffer full + select { case txHashCh <- txHash: default: } + } + } + + // Also monitor for task completion + if e.Type == event.SDKTaskCompleted { + // Non-blocking send; drop if buffer full + select { case completionCh <- true: default: } + } + // Capture task failures and propagate error message to main goroutine + if e.Type == event.SDKTaskFailed { + if msg, ok := e.Data[event.KeyError].(string); ok && msg != "" { + select { case errCh <- msg: default: } + } else { + select { case errCh <- "task failed (no error message)" : default: } + } + } + }) + require.NoError(t, err, "Failed to subscribe to events") // Start cascade operation @@ -390,8 +402,26 @@ func TestCascadeE2E(t *testing.T) { require.NoError(t, err, "Failed to start cascade operation") t.Logf("Cascade operation started with task ID: %s", taskID) - recievedhash := <-txHashCh - <-completionCh + // Wait for both tx-hash and completion with a timeout + var recievedhash string + done := false + timeout := time.After(2 * time.Minute) +waitLoop: + for { + if recievedhash != "" && done { + break waitLoop + } + select { + case h := <-txHashCh: + if recievedhash == "" { recievedhash = h } + case <-completionCh: + done = true + case emsg := <-errCh: + t.Fatalf("cascade task reported failure: %s", emsg) + case <-timeout: + t.Fatalf("timeout waiting for events; recievedhash=%q done=%v", recievedhash, done) + } + } t.Logf("Received transaction hash: %s", recievedhash) diff --git a/tests/system/go.mod b/tests/system/go.mod index 8e1d8840..052a1b76 100644 --- a/tests/system/go.mod +++ b/tests/system/go.mod @@ -95,7 +95,6 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.4 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.6.0 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.3 // indirect github.com/google/flatbuffers v1.12.1 // indirect @@ -105,6 +104,7 @@ require ( github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect @@ -119,7 +119,6 @@ require ( github.com/improbable-eng/grpc-web v0.15.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/kr/pretty v0.3.1 // indirect @@ -129,8 +128,6 @@ require ( github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/minio/highwayhash v1.0.3 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect diff --git a/tests/system/go.sum b/tests/system/go.sum index d00c5807..1ac6ecda 100644 --- a/tests/system/go.sum +++ b/tests/system/go.sum @@ -427,6 +427,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= @@ -505,7 +507,6 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -579,11 +580,9 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= @@ -806,7 +805,6 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= @@ -892,7 +890,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -938,7 +935,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1031,7 +1027,6 @@ golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=