From 1269fcdf4215dea6c21c7495444a23fae540c78d Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Fri, 3 Oct 2025 19:03:52 +0500 Subject: [PATCH 01/36] Clean up task from supernode and sn manager --- README.md | 7 - cmd/sncli/cli/cmd_get_status.go | 7 - gen/supernode/service.swagger.json | 27 - gen/supernode/status.pb.go | 593 ++++++++---------- proto/supernode/status.proto | 11 +- sn-manager/internal/updater/updater.go | 110 +--- supernode/cmd/start.go | 1 - supernode/node/supernode/gateway/swagger.json | 39 +- .../node/supernode/server/status_server.go | 19 +- .../supernode/server/status_server_test.go | 50 +- supernode/services/cascade/service.go | 33 +- supernode/services/cascade/status.go | 5 +- supernode/services/cascade/status_test.go | 59 +- .../services/common/supernode/service.go | 60 +- .../services/common/supernode/service_test.go | 104 +-- supernode/services/common/supernode/types.go | 48 +- supernode/services/common/test_helpers.go | 15 - 17 files changed, 337 insertions(+), 851 deletions(-) delete mode 100644 supernode/services/common/test_helpers.go diff --git a/README.md b/README.md index 6e152ba3..4b4da332 100644 --- a/README.md +++ b/README.md @@ -54,19 +54,12 @@ message StatusResponse { string hardware_summary = 4; // Formatted hardware summary (e.g., "8 cores / 32GB RAM") } - message ServiceTasks { - string service_name = 1; - repeated string task_ids = 2; - int32 task_count = 3; - } - message Network { int32 peers_count = 1; // Number of connected peers in P2P network repeated string peer_addresses = 2; // List of connected peer addresses (format: "ID@IP:Port") } Resources resources = 3; - repeated ServiceTasks running_tasks = 4; // Services with currently running tasks repeated string registered_services = 5; // All registered/available services Network network = 6; // P2P network information int32 rank = 7; // Rank in the top supernodes list (0 if not in top list) diff --git a/cmd/sncli/cli/cmd_get_status.go b/cmd/sncli/cli/cmd_get_status.go index 9603089b..bb181674 100644 --- a/cmd/sncli/cli/cmd_get_status.go +++ b/cmd/sncli/cli/cmd_get_status.go @@ -20,13 +20,6 @@ func (c *CLI) GetSupernodeStatus() error { fmt.Printf(" Memory: %.2fGB used / %.2fGB total (%.2f%%)\n", resp.Resources.Memory.UsedGB, resp.Resources.Memory.TotalGB, resp.Resources.Memory.UsagePercent) - if len(resp.RunningTasks) > 0 { - fmt.Println(" Running Tasks:") - for _, service := range resp.RunningTasks { - fmt.Printf(" - %s (Tasks: %d)\n", service.ServiceName, service.TaskCount) - } - } - if len(resp.RegisteredServices) > 0 { fmt.Println(" Registered Services:") for _, svc := range resp.RegisteredServices { diff --git a/gen/supernode/service.swagger.json b/gen/supernode/service.swagger.json index 08140033..0b515a20 100644 --- a/gen/supernode/service.swagger.json +++ b/gen/supernode/service.swagger.json @@ -391,25 +391,6 @@ }, "title": "System resource information" }, - "StatusResponseServiceTasks": { - "type": "object", - "properties": { - "serviceName": { - "type": "string" - }, - "taskIds": { - "type": "array", - "items": { - "type": "string" - } - }, - "taskCount": { - "type": "integer", - "format": "int32" - } - }, - "title": "ServiceTasks contains task information for a specific service" - }, "protobufAny": { "type": "object", "properties": { @@ -483,14 +464,6 @@ "resources": { "$ref": "#/definitions/StatusResponseResources" }, - "runningTasks": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/StatusResponseServiceTasks" - }, - "title": "Services with currently running tasks" - }, "registeredServices": { "type": "array", "items": { diff --git a/gen/supernode/status.pb.go b/gen/supernode/status.pb.go index 52045726..f3182114 100644 --- a/gen/supernode/status.pb.go +++ b/gen/supernode/status.pb.go @@ -74,15 +74,14 @@ type StatusResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Supernode version - UptimeSeconds uint64 `protobuf:"varint,2,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` // Uptime in seconds - Resources *StatusResponse_Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` - RunningTasks []*StatusResponse_ServiceTasks `protobuf:"bytes,4,rep,name=running_tasks,json=runningTasks,proto3" json:"running_tasks,omitempty"` // Services with currently running tasks - RegisteredServices []string `protobuf:"bytes,5,rep,name=registered_services,json=registeredServices,proto3" json:"registered_services,omitempty"` // All registered/available services - Network *StatusResponse_Network `protobuf:"bytes,6,opt,name=network,proto3" json:"network,omitempty"` // P2P network information - Rank int32 `protobuf:"varint,7,opt,name=rank,proto3" json:"rank,omitempty"` // Rank in the top supernodes list (0 if not in top list) - IpAddress string `protobuf:"bytes,8,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Supernode IP address with port (e.g., "192.168.1.1:4445") - P2PMetrics *StatusResponse_P2PMetrics `protobuf:"bytes,9,opt,name=p2p_metrics,json=p2pMetrics,proto3" json:"p2p_metrics,omitempty"` + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Supernode version + UptimeSeconds uint64 `protobuf:"varint,2,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` // Uptime in seconds + Resources *StatusResponse_Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` + RegisteredServices []string `protobuf:"bytes,5,rep,name=registered_services,json=registeredServices,proto3" json:"registered_services,omitempty"` // All registered/available services + Network *StatusResponse_Network `protobuf:"bytes,6,opt,name=network,proto3" json:"network,omitempty"` // P2P network information + Rank int32 `protobuf:"varint,7,opt,name=rank,proto3" json:"rank,omitempty"` // Rank in the top supernodes list (0 if not in top list) + IpAddress string `protobuf:"bytes,8,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Supernode IP address with port (e.g., "192.168.1.1:4445") + P2PMetrics *StatusResponse_P2PMetrics `protobuf:"bytes,9,opt,name=p2p_metrics,json=p2pMetrics,proto3" json:"p2p_metrics,omitempty"` } func (x *StatusResponse) Reset() { @@ -136,13 +135,6 @@ func (x *StatusResponse) GetResources() *StatusResponse_Resources { return nil } -func (x *StatusResponse) GetRunningTasks() []*StatusResponse_ServiceTasks { - if x != nil { - return x.RunningTasks - } - return nil -} - func (x *StatusResponse) GetRegisteredServices() []string { if x != nil { return x.RegisteredServices @@ -248,68 +240,6 @@ func (x *StatusResponse_Resources) GetHardwareSummary() string { return "" } -// ServiceTasks contains task information for a specific service -type StatusResponse_ServiceTasks struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` - TaskIds []string `protobuf:"bytes,2,rep,name=task_ids,json=taskIds,proto3" json:"task_ids,omitempty"` - TaskCount int32 `protobuf:"varint,3,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"` -} - -func (x *StatusResponse_ServiceTasks) Reset() { - *x = StatusResponse_ServiceTasks{} - mi := &file_supernode_status_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_ServiceTasks) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_ServiceTasks) ProtoMessage() {} - -func (x *StatusResponse_ServiceTasks) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_ServiceTasks.ProtoReflect.Descriptor instead. -func (*StatusResponse_ServiceTasks) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 1} -} - -func (x *StatusResponse_ServiceTasks) GetServiceName() string { - if x != nil { - return x.ServiceName - } - return "" -} - -func (x *StatusResponse_ServiceTasks) GetTaskIds() []string { - if x != nil { - return x.TaskIds - } - return nil -} - -func (x *StatusResponse_ServiceTasks) GetTaskCount() int32 { - if x != nil { - return x.TaskCount - } - return 0 -} - // Network information type StatusResponse_Network struct { state protoimpl.MessageState @@ -322,7 +252,7 @@ type StatusResponse_Network struct { func (x *StatusResponse_Network) Reset() { *x = StatusResponse_Network{} - mi := &file_supernode_status_proto_msgTypes[4] + mi := &file_supernode_status_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -334,7 +264,7 @@ func (x *StatusResponse_Network) String() string { func (*StatusResponse_Network) ProtoMessage() {} func (x *StatusResponse_Network) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[4] + mi := &file_supernode_status_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -347,7 +277,7 @@ func (x *StatusResponse_Network) ProtoReflect() protoreflect.Message { // Deprecated: Use StatusResponse_Network.ProtoReflect.Descriptor instead. func (*StatusResponse_Network) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 2} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 1} } func (x *StatusResponse_Network) GetPeersCount() int32 { @@ -380,7 +310,7 @@ type StatusResponse_P2PMetrics struct { func (x *StatusResponse_P2PMetrics) Reset() { *x = StatusResponse_P2PMetrics{} - mi := &file_supernode_status_proto_msgTypes[5] + mi := &file_supernode_status_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -392,7 +322,7 @@ func (x *StatusResponse_P2PMetrics) String() string { func (*StatusResponse_P2PMetrics) ProtoMessage() {} func (x *StatusResponse_P2PMetrics) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[5] + mi := &file_supernode_status_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -405,7 +335,7 @@ func (x *StatusResponse_P2PMetrics) ProtoReflect() protoreflect.Message { // Deprecated: Use StatusResponse_P2PMetrics.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 3} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 2} } func (x *StatusResponse_P2PMetrics) GetDhtMetrics() *StatusResponse_P2PMetrics_DhtMetrics { @@ -461,7 +391,7 @@ type StatusResponse_Resources_CPU struct { func (x *StatusResponse_Resources_CPU) Reset() { *x = StatusResponse_Resources_CPU{} - mi := &file_supernode_status_proto_msgTypes[6] + mi := &file_supernode_status_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -473,7 +403,7 @@ func (x *StatusResponse_Resources_CPU) String() string { func (*StatusResponse_Resources_CPU) ProtoMessage() {} func (x *StatusResponse_Resources_CPU) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[6] + mi := &file_supernode_status_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -516,7 +446,7 @@ type StatusResponse_Resources_Memory struct { func (x *StatusResponse_Resources_Memory) Reset() { *x = StatusResponse_Resources_Memory{} - mi := &file_supernode_status_proto_msgTypes[7] + mi := &file_supernode_status_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -528,7 +458,7 @@ func (x *StatusResponse_Resources_Memory) String() string { func (*StatusResponse_Resources_Memory) ProtoMessage() {} func (x *StatusResponse_Resources_Memory) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[7] + mi := &file_supernode_status_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -586,7 +516,7 @@ type StatusResponse_Resources_Storage struct { func (x *StatusResponse_Resources_Storage) Reset() { *x = StatusResponse_Resources_Storage{} - mi := &file_supernode_status_proto_msgTypes[8] + mi := &file_supernode_status_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -598,7 +528,7 @@ func (x *StatusResponse_Resources_Storage) String() string { func (*StatusResponse_Resources_Storage) ProtoMessage() {} func (x *StatusResponse_Resources_Storage) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[8] + mi := &file_supernode_status_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -663,7 +593,7 @@ type StatusResponse_P2PMetrics_DhtMetrics struct { func (x *StatusResponse_P2PMetrics_DhtMetrics) Reset() { *x = StatusResponse_P2PMetrics_DhtMetrics{} - mi := &file_supernode_status_proto_msgTypes[9] + mi := &file_supernode_status_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -675,7 +605,7 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics) String() string { func (*StatusResponse_P2PMetrics_DhtMetrics) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DhtMetrics) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[9] + mi := &file_supernode_status_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -688,7 +618,7 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics) ProtoReflect() protoreflect.Messa // Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics_DhtMetrics) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 2, 0} } func (x *StatusResponse_P2PMetrics_DhtMetrics) GetStoreSuccessRecent() []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint { @@ -733,7 +663,7 @@ type StatusResponse_P2PMetrics_HandleCounters struct { func (x *StatusResponse_P2PMetrics_HandleCounters) Reset() { *x = StatusResponse_P2PMetrics_HandleCounters{} - mi := &file_supernode_status_proto_msgTypes[10] + mi := &file_supernode_status_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -745,7 +675,7 @@ func (x *StatusResponse_P2PMetrics_HandleCounters) String() string { func (*StatusResponse_P2PMetrics_HandleCounters) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_HandleCounters) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[10] + mi := &file_supernode_status_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -758,7 +688,7 @@ func (x *StatusResponse_P2PMetrics_HandleCounters) ProtoReflect() protoreflect.M // Deprecated: Use StatusResponse_P2PMetrics_HandleCounters.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics_HandleCounters) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 1} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 2, 1} } func (x *StatusResponse_P2PMetrics_HandleCounters) GetTotal() int64 { @@ -805,7 +735,7 @@ type StatusResponse_P2PMetrics_BanEntry struct { func (x *StatusResponse_P2PMetrics_BanEntry) Reset() { *x = StatusResponse_P2PMetrics_BanEntry{} - mi := &file_supernode_status_proto_msgTypes[11] + mi := &file_supernode_status_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -817,7 +747,7 @@ func (x *StatusResponse_P2PMetrics_BanEntry) String() string { func (*StatusResponse_P2PMetrics_BanEntry) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_BanEntry) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[11] + mi := &file_supernode_status_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -830,7 +760,7 @@ func (x *StatusResponse_P2PMetrics_BanEntry) ProtoReflect() protoreflect.Message // Deprecated: Use StatusResponse_P2PMetrics_BanEntry.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics_BanEntry) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 2} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 2, 2} } func (x *StatusResponse_P2PMetrics_BanEntry) GetId() string { @@ -887,7 +817,7 @@ type StatusResponse_P2PMetrics_DatabaseStats struct { func (x *StatusResponse_P2PMetrics_DatabaseStats) Reset() { *x = StatusResponse_P2PMetrics_DatabaseStats{} - mi := &file_supernode_status_proto_msgTypes[12] + mi := &file_supernode_status_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -899,7 +829,7 @@ func (x *StatusResponse_P2PMetrics_DatabaseStats) String() string { func (*StatusResponse_P2PMetrics_DatabaseStats) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DatabaseStats) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[12] + mi := &file_supernode_status_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -912,7 +842,7 @@ func (x *StatusResponse_P2PMetrics_DatabaseStats) ProtoReflect() protoreflect.Me // Deprecated: Use StatusResponse_P2PMetrics_DatabaseStats.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics_DatabaseStats) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 3} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 2, 3} } func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbSizeMb() float64 { @@ -942,7 +872,7 @@ type StatusResponse_P2PMetrics_DiskStatus struct { func (x *StatusResponse_P2PMetrics_DiskStatus) Reset() { *x = StatusResponse_P2PMetrics_DiskStatus{} - mi := &file_supernode_status_proto_msgTypes[13] + mi := &file_supernode_status_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -954,7 +884,7 @@ func (x *StatusResponse_P2PMetrics_DiskStatus) String() string { func (*StatusResponse_P2PMetrics_DiskStatus) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DiskStatus) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[13] + mi := &file_supernode_status_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -967,7 +897,7 @@ func (x *StatusResponse_P2PMetrics_DiskStatus) ProtoReflect() protoreflect.Messa // Deprecated: Use StatusResponse_P2PMetrics_DiskStatus.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics_DiskStatus) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 4} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 2, 4} } func (x *StatusResponse_P2PMetrics_DiskStatus) GetAllMb() float64 { @@ -1004,7 +934,7 @@ type StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint struct { func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Reset() { *x = StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{} - mi := &file_supernode_status_proto_msgTypes[16] + mi := &file_supernode_status_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1016,7 +946,7 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) String() string func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[16] + mi := &file_supernode_status_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1029,7 +959,7 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoReflect() // Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0, 0} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 2, 0, 0} } func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetTimeUnix() int64 { @@ -1075,7 +1005,7 @@ type StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint struct { func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Reset() { *x = StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{} - mi := &file_supernode_status_proto_msgTypes[17] + mi := &file_supernode_status_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1087,7 +1017,7 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) String() strin func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[17] + mi := &file_supernode_status_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1100,7 +1030,7 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoReflect() // Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0, 1} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 2, 0, 1} } func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetTimeUnix() int64 { @@ -1154,7 +1084,7 @@ var file_supernode_status_proto_rawDesc = []byte{ 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x32, 0x70, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x22, 0x84, 0x19, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x72, 0x69, 0x63, 0x73, 0x22, 0xca, 0x17, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, @@ -1163,202 +1093,191 @@ var file_supernode_status_proto_rawDesc = []byte{ 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0d, 0x72, - 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x0c, 0x72, 0x75, 0x6e, 0x6e, - 0x69, 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x72, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, - 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x6e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x07, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x70, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x70, 0x32, 0x70, - 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x72, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x65, 0x72, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x07, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, + 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x1d, 0x0a, + 0x0a, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x45, 0x0a, 0x0b, + 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x70, 0x32, 0x70, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x1a, 0x82, 0x05, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x12, 0x39, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x70, 0x32, 0x70, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x1a, 0x82, 0x05, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x39, - 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, 0x42, 0x0a, 0x06, 0x6d, 0x65, 0x6d, - 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x73, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x4d, - 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x54, 0x0a, - 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x5f, - 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x68, - 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x1a, 0x40, - 0x0a, 0x03, 0x43, 0x50, 0x55, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, - 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, - 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, - 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, - 0x1a, 0x84, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x67, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x47, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x67, - 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x47, 0x62, 0x12, - 0x21, 0x0a, 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x67, 0x62, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, - 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, - 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, - 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0xab, 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x75, 0x73, - 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x76, 0x61, 0x69, 0x6c, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x0e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x6b, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x73, 0x6b, - 0x49, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x1a, 0x51, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, - 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, - 0x0a, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0xf3, 0x0e, 0x0a, 0x0a, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x12, 0x50, 0x0a, 0x0b, 0x64, 0x68, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, 0x42, 0x0a, 0x06, + 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x12, 0x54, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, - 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x64, 0x68, 0x74, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x74, 0x0a, 0x16, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x65, 0x0a, 0x11, - 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x43, 0x6f, - 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x12, 0x48, 0x0a, 0x08, 0x62, 0x61, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x6e, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x61, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4e, 0x0a, - 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x32, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, + 0x72, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x1a, 0x40, 0x0a, 0x03, 0x43, 0x50, 0x55, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, + 0x72, 0x65, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x19, + 0x0a, 0x08, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x67, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x47, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, + 0x64, 0x5f, 0x67, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, + 0x47, 0x62, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x67, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, + 0x62, 0x6c, 0x65, 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, + 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0xab, 0x01, 0x0a, 0x07, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x75, + 0x73, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x09, 0x75, 0x73, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x76, + 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, + 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x51, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x73, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x65, + 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0xf3, 0x0e, 0x0a, 0x0a, + 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x50, 0x0a, 0x0b, 0x64, 0x68, + 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x43, 0x0a, - 0x04, 0x64, 0x69, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x52, 0x0a, 0x64, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x74, 0x0a, 0x16, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x12, 0x65, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, + 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x48, 0x0a, 0x08, 0x62, 0x61, 0x6e, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x04, 0x64, 0x69, - 0x73, 0x6b, 0x1a, 0xc0, 0x05, 0x0a, 0x0a, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x12, 0x73, 0x0a, 0x14, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x41, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, - 0x6e, 0x74, 0x52, 0x12, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x76, 0x0a, 0x15, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, - 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x73, 0x2e, 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x61, 0x6e, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, + 0x61, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x1a, 0xc0, 0x05, 0x0a, 0x0a, 0x44, 0x68, 0x74, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x73, 0x0a, 0x14, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, - 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x13, 0x62, 0x61, 0x74, 0x63, 0x68, - 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x31, - 0x0a, 0x15, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x6e, 0x65, - 0x64, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x68, - 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x6b, 0x69, 0x70, - 0x73, 0x12, 0x35, 0x0a, 0x17, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, - 0x6e, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x14, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x49, 0x6e, - 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x8f, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f, - 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x66, 0x75, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x73, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x1a, 0xc8, 0x01, 0x0a, 0x12, 0x42, - 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, - 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x12, - 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, - 0x79, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1f, - 0x0a, 0x0b, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12, - 0x23, 0x0a, 0x0d, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x1a, 0x74, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, - 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x1a, 0x9d, 0x01, 0x0a, 0x08, - 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, - 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0a, 0x61, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x65, 0x0a, 0x0d, 0x44, - 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0e, - 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x70, 0x32, 0x70, 0x44, 0x62, 0x53, 0x69, 0x7a, 0x65, 0x4d, - 0x62, 0x12, 0x2f, 0x0a, 0x14, 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x11, 0x70, 0x32, 0x70, 0x44, 0x62, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x1a, 0x55, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x15, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4d, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, - 0x6d, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x4d, 0x62, - 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x06, 0x66, 0x72, 0x65, 0x65, 0x4d, 0x62, 0x1a, 0x7c, 0x0a, 0x19, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x48, 0x61, - 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x50, - 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x12, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x76, 0x0a, 0x15, + 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, 0x72, + 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x74, + 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, + 0x13, 0x62, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x52, 0x65, + 0x63, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x5f, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x12, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x6e, + 0x65, 0x64, 0x53, 0x6b, 0x69, 0x70, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x68, 0x6f, 0x74, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, + 0x68, 0x42, 0x61, 0x6e, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x8f, + 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, + 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, + 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x12, 0x21, 0x0a, + 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, + 0x1a, 0xc8, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, + 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, + 0x55, 0x6e, 0x69, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x6f, 0x75, 0x6e, 0x64, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x66, 0x6f, + 0x75, 0x6e, 0x64, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x1a, 0x74, 0x0a, 0x0e, 0x48, + 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, + 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x1a, 0x9d, 0x01, 0x0a, 0x08, 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6e, 0x69, 0x78, + 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x61, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x1a, 0x65, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x70, 0x32, 0x70, 0x44, + 0x62, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x2f, 0x0a, 0x14, 0x70, 0x32, 0x70, 0x5f, 0x64, + 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x70, 0x32, 0x70, 0x44, 0x62, 0x52, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x55, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, 0x62, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4d, 0x62, 0x12, 0x17, 0x0a, + 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x6d, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, + 0x75, 0x73, 0x65, 0x64, 0x4d, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x6d, + 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x66, 0x72, 0x65, 0x65, 0x4d, 0x62, 0x1a, + 0x7c, 0x0a, 0x19, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, + 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, + 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -1373,49 +1292,47 @@ func file_supernode_status_proto_rawDescGZIP() []byte { return file_supernode_status_proto_rawDescData } -var file_supernode_status_proto_msgTypes = make([]protoimpl.MessageInfo, 18) +var file_supernode_status_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_supernode_status_proto_goTypes = []any{ (*StatusRequest)(nil), // 0: supernode.StatusRequest (*StatusResponse)(nil), // 1: supernode.StatusResponse (*StatusResponse_Resources)(nil), // 2: supernode.StatusResponse.Resources - (*StatusResponse_ServiceTasks)(nil), // 3: supernode.StatusResponse.ServiceTasks - (*StatusResponse_Network)(nil), // 4: supernode.StatusResponse.Network - (*StatusResponse_P2PMetrics)(nil), // 5: supernode.StatusResponse.P2PMetrics - (*StatusResponse_Resources_CPU)(nil), // 6: supernode.StatusResponse.Resources.CPU - (*StatusResponse_Resources_Memory)(nil), // 7: supernode.StatusResponse.Resources.Memory - (*StatusResponse_Resources_Storage)(nil), // 8: supernode.StatusResponse.Resources.Storage - (*StatusResponse_P2PMetrics_DhtMetrics)(nil), // 9: supernode.StatusResponse.P2PMetrics.DhtMetrics - (*StatusResponse_P2PMetrics_HandleCounters)(nil), // 10: supernode.StatusResponse.P2PMetrics.HandleCounters - (*StatusResponse_P2PMetrics_BanEntry)(nil), // 11: supernode.StatusResponse.P2PMetrics.BanEntry - (*StatusResponse_P2PMetrics_DatabaseStats)(nil), // 12: supernode.StatusResponse.P2PMetrics.DatabaseStats - (*StatusResponse_P2PMetrics_DiskStatus)(nil), // 13: supernode.StatusResponse.P2PMetrics.DiskStatus - nil, // 14: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry - nil, // 15: supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry - (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 16: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint - (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 17: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint + (*StatusResponse_Network)(nil), // 3: supernode.StatusResponse.Network + (*StatusResponse_P2PMetrics)(nil), // 4: supernode.StatusResponse.P2PMetrics + (*StatusResponse_Resources_CPU)(nil), // 5: supernode.StatusResponse.Resources.CPU + (*StatusResponse_Resources_Memory)(nil), // 6: supernode.StatusResponse.Resources.Memory + (*StatusResponse_Resources_Storage)(nil), // 7: supernode.StatusResponse.Resources.Storage + (*StatusResponse_P2PMetrics_DhtMetrics)(nil), // 8: supernode.StatusResponse.P2PMetrics.DhtMetrics + (*StatusResponse_P2PMetrics_HandleCounters)(nil), // 9: supernode.StatusResponse.P2PMetrics.HandleCounters + (*StatusResponse_P2PMetrics_BanEntry)(nil), // 10: supernode.StatusResponse.P2PMetrics.BanEntry + (*StatusResponse_P2PMetrics_DatabaseStats)(nil), // 11: supernode.StatusResponse.P2PMetrics.DatabaseStats + (*StatusResponse_P2PMetrics_DiskStatus)(nil), // 12: supernode.StatusResponse.P2PMetrics.DiskStatus + nil, // 13: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry + nil, // 14: supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry + (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 15: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint + (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 16: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint } var file_supernode_status_proto_depIdxs = []int32{ 2, // 0: supernode.StatusResponse.resources:type_name -> supernode.StatusResponse.Resources - 3, // 1: supernode.StatusResponse.running_tasks:type_name -> supernode.StatusResponse.ServiceTasks - 4, // 2: supernode.StatusResponse.network:type_name -> supernode.StatusResponse.Network - 5, // 3: supernode.StatusResponse.p2p_metrics:type_name -> supernode.StatusResponse.P2PMetrics - 6, // 4: supernode.StatusResponse.Resources.cpu:type_name -> supernode.StatusResponse.Resources.CPU - 7, // 5: supernode.StatusResponse.Resources.memory:type_name -> supernode.StatusResponse.Resources.Memory - 8, // 6: supernode.StatusResponse.Resources.storage_volumes:type_name -> supernode.StatusResponse.Resources.Storage - 9, // 7: supernode.StatusResponse.P2PMetrics.dht_metrics:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics - 14, // 8: supernode.StatusResponse.P2PMetrics.network_handle_metrics:type_name -> supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry - 15, // 9: supernode.StatusResponse.P2PMetrics.conn_pool_metrics:type_name -> supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry - 11, // 10: supernode.StatusResponse.P2PMetrics.ban_list:type_name -> supernode.StatusResponse.P2PMetrics.BanEntry - 12, // 11: supernode.StatusResponse.P2PMetrics.database:type_name -> supernode.StatusResponse.P2PMetrics.DatabaseStats - 13, // 12: supernode.StatusResponse.P2PMetrics.disk:type_name -> supernode.StatusResponse.P2PMetrics.DiskStatus - 16, // 13: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint - 17, // 14: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint - 10, // 15: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters - 16, // [16:16] is the sub-list for method output_type - 16, // [16:16] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name + 3, // 1: supernode.StatusResponse.network:type_name -> supernode.StatusResponse.Network + 4, // 2: supernode.StatusResponse.p2p_metrics:type_name -> supernode.StatusResponse.P2PMetrics + 5, // 3: supernode.StatusResponse.Resources.cpu:type_name -> supernode.StatusResponse.Resources.CPU + 6, // 4: supernode.StatusResponse.Resources.memory:type_name -> supernode.StatusResponse.Resources.Memory + 7, // 5: supernode.StatusResponse.Resources.storage_volumes:type_name -> supernode.StatusResponse.Resources.Storage + 8, // 6: supernode.StatusResponse.P2PMetrics.dht_metrics:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics + 13, // 7: supernode.StatusResponse.P2PMetrics.network_handle_metrics:type_name -> supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry + 14, // 8: supernode.StatusResponse.P2PMetrics.conn_pool_metrics:type_name -> supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry + 10, // 9: supernode.StatusResponse.P2PMetrics.ban_list:type_name -> supernode.StatusResponse.P2PMetrics.BanEntry + 11, // 10: supernode.StatusResponse.P2PMetrics.database:type_name -> supernode.StatusResponse.P2PMetrics.DatabaseStats + 12, // 11: supernode.StatusResponse.P2PMetrics.disk:type_name -> supernode.StatusResponse.P2PMetrics.DiskStatus + 15, // 12: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint + 16, // 13: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint + 9, // 14: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_supernode_status_proto_init() } @@ -1429,7 +1346,7 @@ func file_supernode_status_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_supernode_status_proto_rawDesc, NumEnums: 0, - NumMessages: 18, + NumMessages: 17, NumExtensions: 0, NumServices: 0, }, diff --git a/proto/supernode/status.proto b/proto/supernode/status.proto index 7cafe908..c9edc5e7 100644 --- a/proto/supernode/status.proto +++ b/proto/supernode/status.proto @@ -42,13 +42,6 @@ message StatusResponse { string hardware_summary = 4; // Formatted hardware summary (e.g., "8 cores / 32GB RAM") } - // ServiceTasks contains task information for a specific service - message ServiceTasks { - string service_name = 1; - repeated string task_ids = 2; - int32 task_count = 3; - } - // Network information message Network { int32 peers_count = 1; // Number of connected peers in P2P network @@ -56,7 +49,6 @@ message StatusResponse { } Resources resources = 3; - repeated ServiceTasks running_tasks = 4; // Services with currently running tasks repeated string registered_services = 5; // All registered/available services Network network = 6; // P2P network information int32 rank = 7; // Rank in the top supernodes list (0 if not in top list) @@ -127,7 +119,6 @@ message StatusResponse { DatabaseStats database = 5; DiskStatus disk = 6; } - + P2PMetrics p2p_metrics = 9; } - diff --git a/sn-manager/internal/updater/updater.go b/sn-manager/internal/updater/updater.go index 5bf650c1..b2e01e2d 100644 --- a/sn-manager/internal/updater/updater.go +++ b/sn-manager/internal/updater/updater.go @@ -3,31 +3,24 @@ package updater import ( "context" "fmt" - "io" "log" - "net/http" "os" "path/filepath" "strings" "time" - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/version" - "github.com/LumeraProtocol/supernode/v2/supernode/node/supernode/gateway" - "google.golang.org/protobuf/encoding/protojson" ) // Global updater timing constants const ( - // gatewayTimeout bounds the local gateway status probe - gatewayTimeout = 15 * time.Second // updateCheckInterval is how often the periodic updater runs updateCheckInterval = 10 * time.Minute // forceUpdateAfter is the age threshold after a release is published - // beyond which updates are applied regardless of normal gates (idle, policy) + // beyond which updates are applied regardless of normal gates (policy only) forceUpdateAfter = 30 * time.Minute ) @@ -36,27 +29,19 @@ type AutoUpdater struct { homeDir string githubClient github.GithubClient versionMgr *version.Manager - gatewayURL string ticker *time.Ticker stopCh chan struct{} managerVersion string - // Gateway error backoff state - gwErrCount int - gwErrWindowStart time.Time } // Use protobuf JSON decoding for gateway responses (int64s encoded as strings) func New(homeDir string, cfg *config.Config, managerVersion string) *AutoUpdater { - // Use the correct gateway endpoint with imported constants - gatewayURL := fmt.Sprintf("http://localhost:%d/api/v1/status", gateway.DefaultGatewayPort) - return &AutoUpdater{ config: cfg, homeDir: homeDir, githubClient: github.NewClient(config.GitHubRepo), versionMgr: version.NewManager(homeDir), - gatewayURL: gatewayURL, stopCh: make(chan struct{}), managerVersion: managerVersion, } @@ -136,45 +121,6 @@ func (u *AutoUpdater) ShouldUpdate(current, latest string) bool { // isGatewayIdle returns (idle, isError). When isError is true, // the gateway could not be reliably checked (network/error/invalid). // When isError is false and idle is false, the gateway is busy. -func (u *AutoUpdater) isGatewayIdle() (bool, bool) { - client := &http.Client{Timeout: gatewayTimeout} - - resp, err := client.Get(u.gatewayURL) - if err != nil { - log.Printf("Failed to check gateway status: %v", err) - // Error contacting gateway - return false, true - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - log.Printf("Gateway returned status %d, not safe to update", resp.StatusCode) - return false, true - } - - var status pb.StatusResponse - body, err := io.ReadAll(resp.Body) - if err != nil { - log.Printf("Failed to read gateway response: %v", err) - return false, true - } - if err := protojson.Unmarshal(body, &status); err != nil { - log.Printf("Failed to decode gateway response: %v", err) - return false, true - } - - totalTasks := 0 - for _, service := range status.RunningTasks { - totalTasks += int(service.TaskCount) - } - - if totalTasks > 0 { - log.Printf("Gateway busy: %d running tasks", totalTasks) - return false, false - } - - return true, false -} // checkAndUpdateCombined performs a single release check and, if needed, // downloads the release tarball once to update sn-manager and SuperNode. @@ -237,20 +183,6 @@ func (u *AutoUpdater) checkAndUpdateCombined(force bool) { return } - // Gate all updates (manager + SuperNode) on gateway idleness - // to avoid disrupting traffic during a self-update. - if !force { - if idle, isErr := u.isGatewayIdle(); !idle { - if isErr { - // Track errors and possibly request a clean SuperNode restart - u.handleGatewayError() - } else { - log.Println("Gateway busy, deferring updates") - } - return - } - } - // Download the combined release tarball once tarURL, err := u.githubClient.GetReleaseTarballURL(latest) if err != nil { @@ -363,43 +295,3 @@ func (u *AutoUpdater) checkAndUpdateCombined(force bool) { // handleGatewayError increments an error counter in a rolling 5-minute window // and when the threshold is reached, requests a clean SuperNode restart by // writing the standard restart marker consumed by the manager monitor. -func (u *AutoUpdater) handleGatewayError() { - const ( - window = 5 * time.Minute - retries = 3 // attempts within window before restart - ) - now := time.Now() - if u.gwErrWindowStart.IsZero() { - u.gwErrWindowStart = now - u.gwErrCount = 1 - log.Printf("Gateway check error (1/%d); starting 5m observation window", retries) - return - } - - elapsed := now.Sub(u.gwErrWindowStart) - if elapsed >= window { - // Window elapsed; decide based on accumulated errors - if u.gwErrCount >= retries { - marker := filepath.Join(u.homeDir, ".needs_restart") - if err := os.WriteFile(marker, []byte("gateway-error-recover"), 0644); err != nil { - log.Printf("Failed to write restart marker after gateway errors: %v", err) - } else { - log.Printf("Gateway errors persisted (%d/%d) over >=5m; requesting SuperNode restart to recover gateway", u.gwErrCount, retries) - } - } - // Start a new window beginning now, with this error as the first hit - u.gwErrWindowStart = now - u.gwErrCount = 1 - return - } - - // Still within the window; increment and possibly announce threshold reached - u.gwErrCount++ - if u.gwErrCount < retries { - log.Printf("Gateway check error (%d/%d) within 5m; will retry", u.gwErrCount, retries) - return - } - // Threshold reached but do not restart until full window elapses - remaining := window - elapsed - log.Printf("Gateway error threshold reached; waiting %s before requesting SuperNode restart", remaining.Truncate(time.Second)) -} diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 3d04f7a0..97569f2b 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -126,7 +126,6 @@ The supernode will connect to the Lumera network and begin participating in the // Create supernode status service statusService := supernodeService.NewSupernodeStatusService(*p2pService, lumeraClient, appConfig) - statusService.RegisterTaskProvider(cService) // Create supernode server supernodeServer := server.NewSupernodeServer(statusService) diff --git a/supernode/node/supernode/gateway/swagger.json b/supernode/node/supernode/gateway/swagger.json index af023816..8e8b66fe 100644 --- a/supernode/node/supernode/gateway/swagger.json +++ b/supernode/node/supernode/gateway/swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "supernode/supernode.proto", + "title": "supernode/service.proto", "version": "version not set" }, "tags": [ @@ -55,6 +55,15 @@ } } }, + "parameters": [ + { + "name": "includeP2pMetrics", + "description": "Optional: include detailed P2P metrics in the response\nMaps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true", + "in": "query", + "required": false, + "type": "boolean" + } + ], "tags": [ "SupernodeService" ] @@ -169,25 +178,7 @@ }, "title": "System resource information" }, - "StatusResponseServiceTasks": { - "type": "object", - "properties": { - "serviceName": { - "type": "string" - }, - "taskIds": { - "type": "array", - "items": { - "type": "string" - } - }, - "taskCount": { - "type": "integer", - "format": "int32" - } - }, - "title": "ServiceTasks contains task information for a specific service" - }, + "protobufAny": { "type": "object", "properties": { @@ -261,14 +252,6 @@ "resources": { "$ref": "#/definitions/StatusResponseResources" }, - "runningTasks": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/StatusResponseServiceTasks" - }, - "title": "Services with currently running tasks" - }, "registeredServices": { "type": "array", "items": { diff --git a/supernode/node/supernode/server/status_server.go b/supernode/node/supernode/server/status_server.go index 8b061a3b..7e01410d 100644 --- a/supernode/node/supernode/server/status_server.go +++ b/supernode/node/supernode/server/status_server.go @@ -76,8 +76,7 @@ func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) StorageVolumes: make([]*pb.StatusResponse_Resources_Storage, 0, len(status.Resources.Storage)), HardwareSummary: status.Resources.HardwareSummary, }, - RunningTasks: make([]*pb.StatusResponse_ServiceTasks, 0, len(status.RunningTasks)), - RegisteredServices: status.RegisteredServices, + RegisteredServices: nil, Network: &pb.StatusResponse_Network{ PeersCount: status.Network.PeersCount, PeerAddresses: status.Network.PeerAddresses, @@ -98,14 +97,14 @@ func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) response.Resources.StorageVolumes = append(response.Resources.StorageVolumes, storageInfo) } - // Convert service tasks - for _, service := range status.RunningTasks { - serviceTask := &pb.StatusResponse_ServiceTasks{ - ServiceName: service.ServiceName, - TaskIds: service.TaskIDs, - TaskCount: service.TaskCount, + // Populate registered services from server registrations + if len(s.services) > 0 { + response.RegisteredServices = make([]string, 0, len(s.services)+1) + for _, svc := range s.services { + response.RegisteredServices = append(response.RegisteredServices, svc.Name) } - response.RunningTasks = append(response.RunningTasks, serviceTask) + // Also include health service + response.RegisteredServices = append(response.RegisteredServices, "grpc.health.v1.Health") } // Map optional P2P metrics @@ -174,8 +173,6 @@ func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) pbpm.Disk.UsedMb = pm.Disk.UsedMB pbpm.Disk.FreeMb = pm.Disk.FreeMB - // Detailed recent per-request lists removed from API - response.P2PMetrics = pbpm } diff --git a/supernode/node/supernode/server/status_server_test.go b/supernode/node/supernode/server/status_server_test.go index 7b2808d7..e4cb9edc 100644 --- a/supernode/node/supernode/server/status_server_test.go +++ b/supernode/node/supernode/server/status_server_test.go @@ -8,7 +8,6 @@ import ( "github.com/stretchr/testify/require" pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" ) @@ -30,12 +29,11 @@ func TestSupernodeServer_GetStatus(t *testing.T) { assert.NotNil(t, resp.Resources) assert.NotNil(t, resp.Resources.Cpu) assert.NotNil(t, resp.Resources.Memory) - assert.NotNil(t, resp.RunningTasks) - assert.NotNil(t, resp.RegisteredServices) - + // Registered services populated from server registry + // Check version field assert.NotEmpty(t, resp.Version) - + // Check uptime field assert.True(t, resp.UptimeSeconds >= 0) @@ -48,7 +46,7 @@ func TestSupernodeServer_GetStatus(t *testing.T) { assert.True(t, resp.Resources.Memory.TotalGb > 0) assert.True(t, resp.Resources.Memory.UsagePercent >= 0) assert.True(t, resp.Resources.Memory.UsagePercent <= 100) - + // Check hardware summary if resp.Resources.Cpu.Cores > 0 && resp.Resources.Memory.TotalGb > 0 { assert.NotEmpty(t, resp.Resources.HardwareSummary) @@ -58,10 +56,9 @@ func TestSupernodeServer_GetStatus(t *testing.T) { assert.NotEmpty(t, resp.Resources.StorageVolumes) assert.Equal(t, "/", resp.Resources.StorageVolumes[0].Path) - // Should have no services initially - assert.Empty(t, resp.RunningTasks) - assert.Empty(t, resp.RegisteredServices) - + // Should have no registered services initially (no services registered) + assert.Equal(t, 0, len(resp.RegisteredServices)) + // Check new fields have default values assert.NotNil(t, resp.Network) assert.Equal(t, int32(0), resp.Network.PeersCount) @@ -70,38 +67,7 @@ func TestSupernodeServer_GetStatus(t *testing.T) { assert.Empty(t, resp.IpAddress) } -func TestSupernodeServer_GetStatusWithService(t *testing.T) { - ctx := context.Background() - - // Create status service - statusService := supernode.NewSupernodeStatusService(nil, nil, nil) - - // Add a mock task provider - mockProvider := &common.MockTaskProvider{ - ServiceName: "test-service", - TaskIDs: []string{"task1", "task2"}, - } - statusService.RegisterTaskProvider(mockProvider) - - // Create server - server := NewSupernodeServer(statusService) - - // Test with service - resp, err := server.GetStatus(ctx, &pb.StatusRequest{}) - require.NoError(t, err) - assert.NotNil(t, resp) - - // Should have one service - assert.Len(t, resp.RunningTasks, 1) - assert.Len(t, resp.RegisteredServices, 1) - assert.Equal(t, []string{"test-service"}, resp.RegisteredServices) - - // Check service details - service := resp.RunningTasks[0] - assert.Equal(t, "test-service", service.ServiceName) - assert.Equal(t, int32(2), service.TaskCount) - assert.Equal(t, []string{"task1", "task2"}, service.TaskIds) -} +// Removed: task tracking tests; status no longer returns running tasks func TestSupernodeServer_Desc(t *testing.T) { statusService := supernode.NewSupernodeStatusService(nil, nil, nil) diff --git a/supernode/services/cascade/service.go b/supernode/services/cascade/service.go index f88c284b..3619d7d1 100644 --- a/supernode/services/cascade/service.go +++ b/supernode/services/cascade/service.go @@ -9,7 +9,6 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" "github.com/LumeraProtocol/supernode/v2/supernode/services/common/base" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" ) type CascadeService struct { @@ -22,7 +21,6 @@ type CascadeService struct { } // Compile-time checks to ensure CascadeService implements required interfaces -var _ supernode.TaskProvider = (*CascadeService)(nil) var _ CascadeServiceFactory = (*CascadeService)(nil) // NewCascadeRegistrationTask creates a new task for cascade registration @@ -37,30 +35,13 @@ func (service *CascadeService) Run(ctx context.Context) error { return service.RunHelper(ctx, service.config.SupernodeAccountAddress, logPrefix) } -// GetServiceName returns the name of the cascade service -func (service *CascadeService) GetServiceName() string { - return "cascade" -} - -// GetRunningTasks returns a list of currently running task IDs -func (service *CascadeService) GetRunningTasks() []string { - var taskIDs []string - for _, t := range service.Worker.Tasks() { - // Include only tasks that are not in a final state - if st := t.Status(); st != nil && st.SubStatus != nil && !st.SubStatus.IsFinal() { - taskIDs = append(taskIDs, t.ID()) - } - } - return taskIDs -} - // NewCascadeService returns a new CascadeService instance func NewCascadeService(config *Config, lumera lumera.Client, p2pClient p2p.Client, codec codec.Codec, rqstore rqstore.Store) *CascadeService { - return &CascadeService{ - config: config, - SuperNodeService: base.NewSuperNodeService(p2pClient), - LumeraClient: adaptors.NewLumeraClient(lumera), - P2P: adaptors.NewP2PService(p2pClient, rqstore), - RQ: adaptors.NewCodecService(codec), - } + return &CascadeService{ + config: config, + SuperNodeService: base.NewSuperNodeService(p2pClient), + LumeraClient: adaptors.NewLumeraClient(lumera), + P2P: adaptors.NewP2PService(p2pClient, rqstore), + RQ: adaptors.NewCodecService(codec), + } } diff --git a/supernode/services/cascade/status.go b/supernode/services/cascade/status.go index b5633a45..64772443 100644 --- a/supernode/services/cascade/status.go +++ b/supernode/services/cascade/status.go @@ -11,12 +11,9 @@ type StatusResponse = supernode.StatusResponse // GetStatus delegates to the common supernode status service func (service *CascadeService) GetStatus(ctx context.Context) (StatusResponse, error) { - // Create a status service and register the cascade service as a task provider + // Create a status service // Pass nil for optional dependencies (P2P, lumera client, and config) // as cascade service doesn't have access to them in this context statusService := supernode.NewSupernodeStatusService(nil, nil, nil) - statusService.RegisterTaskProvider(service) - - // Get the status from the common service return statusService.GetStatus(ctx, false) } diff --git a/supernode/services/cascade/status_test.go b/supernode/services/cascade/status_test.go index d85f9f8f..1a15c694 100644 --- a/supernode/services/cascade/status_test.go +++ b/supernode/services/cascade/status_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/LumeraProtocol/supernode/v2/supernode/services/common/base" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" "github.com/stretchr/testify/assert" ) @@ -13,29 +12,13 @@ func TestGetStatus(t *testing.T) { ctx := context.Background() tests := []struct { - name string - taskCount int - expectErr bool - expectTasks int + name string + taskCount int + expectErr bool }{ - { - name: "no tasks", - taskCount: 0, - expectErr: false, - expectTasks: 0, - }, - { - name: "one task", - taskCount: 1, - expectErr: false, - expectTasks: 1, - }, - { - name: "multiple tasks", - taskCount: 3, - expectErr: false, - expectTasks: 3, - }, + {name: "no tasks", taskCount: 0, expectErr: false}, + {name: "one task", taskCount: 1, expectErr: false}, + {name: "multiple tasks", taskCount: 3, expectErr: false}, } for _, tt := range tests { @@ -66,7 +49,7 @@ func TestGetStatus(t *testing.T) { // Version check assert.NotEmpty(t, resp.Version) - + // Uptime check assert.True(t, resp.UptimeSeconds >= 0) @@ -79,7 +62,7 @@ func TestGetStatus(t *testing.T) { assert.True(t, resp.Resources.Memory.TotalGB > 0) assert.True(t, resp.Resources.Memory.UsedGB <= resp.Resources.Memory.TotalGB) assert.True(t, resp.Resources.Memory.UsagePercent >= 0 && resp.Resources.Memory.UsagePercent <= 100) - + // Hardware summary check if resp.Resources.CPU.Cores > 0 && resp.Resources.Memory.TotalGB > 0 { assert.NotEmpty(t, resp.Resources.HardwareSummary) @@ -89,34 +72,14 @@ func TestGetStatus(t *testing.T) { assert.NotEmpty(t, resp.Resources.Storage) assert.Equal(t, "/", resp.Resources.Storage[0].Path) - // Registered services check - assert.Contains(t, resp.RegisteredServices, "cascade") - + // Registered services is populated at server layer; cascade service returns none + assert.Empty(t, resp.RegisteredServices) + // Check new fields have default values (since service doesn't have access to P2P/lumera/config) assert.Equal(t, int32(0), resp.Network.PeersCount) assert.Empty(t, resp.Network.PeerAddresses) assert.Equal(t, int32(0), resp.Rank) assert.Empty(t, resp.IPAddress) - - // Task count check - look for cascade service in the running tasks list - var cascadeService *supernode.ServiceTasks - for _, service := range resp.RunningTasks { - if service.ServiceName == "cascade" { - cascadeService = &service - break - } - } - - if tt.expectTasks > 0 { - assert.NotNil(t, cascadeService, "cascade service should be present") - assert.Equal(t, tt.expectTasks, int(cascadeService.TaskCount)) - assert.Equal(t, tt.expectTasks, len(cascadeService.TaskIDs)) - } else { - // If no tasks expected, either no cascade service or empty task count - if cascadeService != nil { - assert.Equal(t, 0, int(cascadeService.TaskCount)) - } - } }) } } diff --git a/supernode/services/common/supernode/service.go b/supernode/services/common/supernode/service.go index 1d0b9dd0..1e707e03 100644 --- a/supernode/services/common/supernode/service.go +++ b/supernode/services/common/supernode/service.go @@ -19,34 +19,26 @@ var Version = "dev" // SupernodeStatusService provides centralized status information // by collecting system metrics and aggregating task information from registered services type SupernodeStatusService struct { - taskProviders []TaskProvider // List of registered services that provide task information - metrics *MetricsCollector // System metrics collector for CPU and memory stats - storagePaths []string // Paths to monitor for storage metrics - startTime time.Time // Service start time for uptime calculation - p2pService p2p.Client // P2P service for network information - lumeraClient lumera.Client // Lumera client for blockchain queries - config *config.Config // Supernode configuration + metrics *MetricsCollector // System metrics collector for CPU and memory stats + storagePaths []string // Paths to monitor for storage metrics + startTime time.Time // Service start time for uptime calculation + p2pService p2p.Client // P2P service for network information + lumeraClient lumera.Client // Lumera client for blockchain queries + config *config.Config // Supernode configuration } // NewSupernodeStatusService creates a new supernode status service instance func NewSupernodeStatusService(p2pService p2p.Client, lumeraClient lumera.Client, cfg *config.Config) *SupernodeStatusService { return &SupernodeStatusService{ - taskProviders: make([]TaskProvider, 0), - metrics: NewMetricsCollector(), - storagePaths: []string{"/"}, // Default to monitoring root filesystem - startTime: time.Now(), - p2pService: p2pService, - lumeraClient: lumeraClient, - config: cfg, + metrics: NewMetricsCollector(), + storagePaths: []string{"/"}, // Default to monitoring root filesystem + startTime: time.Now(), + p2pService: p2pService, + lumeraClient: lumeraClient, + config: cfg, } } -// RegisterTaskProvider registers a service as a task provider -// This allows the service to report its running tasks in status responses -func (s *SupernodeStatusService) RegisterTaskProvider(provider TaskProvider) { - s.taskProviders = append(s.taskProviders, provider) -} - // GetStatus returns the current system status including all registered services // This method collects CPU metrics, memory usage, and task information from all providers func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetrics bool) (StatusResponse, error) { @@ -99,25 +91,7 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric // Collect storage metrics resp.Resources.Storage = s.metrics.CollectStorageMetrics(ctx, s.storagePaths) - // Collect service information from all registered providers - resp.RunningTasks = make([]ServiceTasks, 0, len(s.taskProviders)) - resp.RegisteredServices = make([]string, 0, len(s.taskProviders)) - - for _, provider := range s.taskProviders { - serviceName := provider.GetServiceName() - tasks := provider.GetRunningTasks() - - // Add to registered services list - resp.RegisteredServices = append(resp.RegisteredServices, serviceName) - - // Add all services to running tasks (even with 0 tasks) - serviceTask := ServiceTasks{ - ServiceName: serviceName, - TaskIDs: tasks, - TaskCount: int32(len(tasks)), - } - resp.RunningTasks = append(resp.RunningTasks, serviceTask) - } + // Task tracking removed; RegisteredServices populated at server layer // Initialize network info resp.Network = NetworkInfo{ @@ -218,7 +192,7 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric } } - // Detailed recent per-request lists removed from API mapping + // Detailed recent per-request lists removed from API mapping } // DHT rolling metrics snapshot is attached at top-level under dht_metrics @@ -284,11 +258,5 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric } - // Log summary statistics - totalTasks := 0 - for _, service := range resp.RunningTasks { - totalTasks += int(service.TaskCount) - } - return resp, nil } diff --git a/supernode/services/common/supernode/service_test.go b/supernode/services/common/supernode/service_test.go index e2f82287..2a499156 100644 --- a/supernode/services/common/supernode/service_test.go +++ b/supernode/services/common/supernode/service_test.go @@ -4,7 +4,6 @@ import ( "context" "testing" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" "github.com/stretchr/testify/assert" ) @@ -14,12 +13,12 @@ func TestSupernodeStatusService(t *testing.T) { t.Run("empty service", func(t *testing.T) { statusService := NewSupernodeStatusService(nil, nil, nil) - resp, err := statusService.GetStatus(ctx, false) + resp, err := statusService.GetStatus(ctx, false) assert.NoError(t, err) - + // Should have version info assert.NotEmpty(t, resp.Version) - + // Should have uptime assert.True(t, resp.UptimeSeconds >= 0) @@ -30,7 +29,7 @@ func TestSupernodeStatusService(t *testing.T) { assert.True(t, resp.Resources.Memory.TotalGB > 0) assert.True(t, resp.Resources.Memory.UsagePercent >= 0) assert.True(t, resp.Resources.Memory.UsagePercent <= 100) - + // Should have hardware summary if cores and memory are available if resp.Resources.CPU.Cores > 0 && resp.Resources.Memory.TotalGB > 0 { assert.NotEmpty(t, resp.Resources.HardwareSummary) @@ -40,104 +39,13 @@ func TestSupernodeStatusService(t *testing.T) { assert.NotEmpty(t, resp.Resources.Storage) assert.Equal(t, "/", resp.Resources.Storage[0].Path) - // Should have empty services list - assert.Empty(t, resp.RunningTasks) + // Registered services now populated at server layer; status service leaves empty assert.Empty(t, resp.RegisteredServices) - + // Should have default values for new fields assert.Equal(t, int32(0), resp.Network.PeersCount) assert.Empty(t, resp.Network.PeerAddresses) assert.Equal(t, int32(0), resp.Rank) assert.Empty(t, resp.IPAddress) }) - - t.Run("single service with tasks", func(t *testing.T) { - statusService := NewSupernodeStatusService(nil, nil, nil) - - // Register a mock task provider - mockProvider := &common.MockTaskProvider{ - ServiceName: "test-service", - TaskIDs: []string{"task1", "task2", "task3"}, - } - statusService.RegisterTaskProvider(mockProvider) - - resp, err := statusService.GetStatus(ctx, false) - assert.NoError(t, err) - - // Should have one service - assert.Len(t, resp.RunningTasks, 1) - assert.Len(t, resp.RegisteredServices, 1) - assert.Equal(t, []string{"test-service"}, resp.RegisteredServices) - - service := resp.RunningTasks[0] - assert.Equal(t, "test-service", service.ServiceName) - assert.Equal(t, int32(3), service.TaskCount) - assert.Equal(t, []string{"task1", "task2", "task3"}, service.TaskIDs) - }) - - t.Run("multiple services", func(t *testing.T) { - statusService := NewSupernodeStatusService(nil, nil, nil) - - // Register multiple mock task providers - cascadeProvider := &common.MockTaskProvider{ - ServiceName: "cascade", - TaskIDs: []string{"cascade1", "cascade2"}, - } - senseProvider := &common.MockTaskProvider{ - ServiceName: "sense", - TaskIDs: []string{"sense1"}, - } - - statusService.RegisterTaskProvider(cascadeProvider) - statusService.RegisterTaskProvider(senseProvider) - - resp, err := statusService.GetStatus(ctx, false) - assert.NoError(t, err) - - // Should have two services - assert.Len(t, resp.RunningTasks, 2) - assert.Len(t, resp.RegisteredServices, 2) - assert.Contains(t, resp.RegisteredServices, "cascade") - assert.Contains(t, resp.RegisteredServices, "sense") - - // Check services are present - serviceMap := make(map[string]ServiceTasks) - for _, service := range resp.RunningTasks { - serviceMap[service.ServiceName] = service - } - - cascade, ok := serviceMap["cascade"] - assert.True(t, ok) - assert.Equal(t, int32(2), cascade.TaskCount) - assert.Equal(t, []string{"cascade1", "cascade2"}, cascade.TaskIDs) - - sense, ok := serviceMap["sense"] - assert.True(t, ok) - assert.Equal(t, int32(1), sense.TaskCount) - assert.Equal(t, []string{"sense1"}, sense.TaskIDs) - }) - - t.Run("service with no tasks", func(t *testing.T) { - statusService := NewSupernodeStatusService(nil, nil, nil) - - // Register a mock task provider with no tasks - mockProvider := &common.MockTaskProvider{ - ServiceName: "empty-service", - TaskIDs: []string{}, - } - statusService.RegisterTaskProvider(mockProvider) - - resp, err := statusService.GetStatus(ctx, false) - assert.NoError(t, err) - - // Should have one service - assert.Len(t, resp.RunningTasks, 1) - assert.Len(t, resp.RegisteredServices, 1) - assert.Equal(t, []string{"empty-service"}, resp.RegisteredServices) - - service := resp.RunningTasks[0] - assert.Equal(t, "empty-service", service.ServiceName) - assert.Equal(t, int32(0), service.TaskCount) - assert.Empty(t, service.TaskIDs) - }) } diff --git a/supernode/services/common/supernode/types.go b/supernode/services/common/supernode/types.go index e84b954a..39579502 100644 --- a/supernode/services/common/supernode/types.go +++ b/supernode/services/common/supernode/types.go @@ -3,15 +3,14 @@ package supernode // StatusResponse represents the complete system status information // with clear organization of resources and services type StatusResponse struct { - Version string // Supernode version - UptimeSeconds uint64 // Uptime in seconds - Resources Resources // System resource information - RunningTasks []ServiceTasks // Services with currently running tasks - RegisteredServices []string // All registered/available services - Network NetworkInfo // P2P network information - Rank int32 // Rank in the top supernodes list (0 if not in top list) - IPAddress string // Supernode IP address with port (e.g., "192.168.1.1:4445") - P2PMetrics P2PMetrics // Detailed P2P metrics snapshot + Version string // Supernode version + UptimeSeconds uint64 // Uptime in seconds + Resources Resources // System resource information + RegisteredServices []string // All registered/available services + Network NetworkInfo // P2P network information + Rank int32 // Rank in the top supernodes list (0 if not in top list) + IPAddress string // Supernode IP address with port (e.g., "192.168.1.1:4445") + P2PMetrics P2PMetrics // Detailed P2P metrics snapshot } // Resources contains system resource metrics @@ -45,13 +44,6 @@ type StorageInfo struct { UsagePercent float64 // Storage usage percentage (0-100) } -// ServiceTasks contains task information for a specific service -type ServiceTasks struct { - ServiceName string // Name of the service (e.g., "cascade") - TaskIDs []string // List of currently running task IDs - TaskCount int32 // Total number of running tasks -} - // NetworkInfo contains P2P network information type NetworkInfo struct { PeersCount int32 // Number of connected peers in P2P network @@ -60,12 +52,12 @@ type NetworkInfo struct { // P2PMetrics mirrors the proto P2P metrics for status API type P2PMetrics struct { - DhtMetrics DhtMetrics - NetworkHandleMetrics map[string]HandleCounters - ConnPoolMetrics map[string]int64 - BanList []BanEntry - Database DatabaseStats - Disk DiskStatus + DhtMetrics DhtMetrics + NetworkHandleMetrics map[string]HandleCounters + ConnPoolMetrics map[string]int64 + BanList []BanEntry + Database DatabaseStats + Disk DiskStatus } type StoreSuccessPoint struct { @@ -117,15 +109,3 @@ type DiskStatus struct { UsedMB float64 FreeMB float64 } - -// Removed: recent per-request lists from public API - -// TaskProvider interface defines the contract for services to provide -// their running task information to the status service -type TaskProvider interface { - // GetServiceName returns the unique name identifier for this service - GetServiceName() string - - // GetRunningTasks returns a list of currently active task IDs - GetRunningTasks() []string -} diff --git a/supernode/services/common/test_helpers.go b/supernode/services/common/test_helpers.go deleted file mode 100644 index c49b940a..00000000 --- a/supernode/services/common/test_helpers.go +++ /dev/null @@ -1,15 +0,0 @@ -package common - -// MockTaskProvider for testing (exported for use in other packages) -type MockTaskProvider struct { - ServiceName string - TaskIDs []string -} - -func (m *MockTaskProvider) GetServiceName() string { - return m.ServiceName -} - -func (m *MockTaskProvider) GetRunningTasks() []string { - return m.TaskIDs -} From 233d074186bcada90bcb3918913d997dbb0c32b5 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Sat, 4 Oct 2025 02:30:05 +0500 Subject: [PATCH 02/36] Consolidate p2p logic in adapter --- pkg/raptorq/helper.go | 49 ----- supernode/services/cascade/adaptors/p2p.go | 17 +- supernode/services/cascade/task.go | 18 +- supernode/services/common/storage/handler.go | 180 ------------------ .../services/common/storage/handler_test.go | 57 ------ 5 files changed, 19 insertions(+), 302 deletions(-) delete mode 100644 pkg/raptorq/helper.go delete mode 100644 supernode/services/common/storage/handler.go delete mode 100644 supernode/services/common/storage/handler_test.go diff --git a/pkg/raptorq/helper.go b/pkg/raptorq/helper.go deleted file mode 100644 index ea36b1ab..00000000 --- a/pkg/raptorq/helper.go +++ /dev/null @@ -1,49 +0,0 @@ -package raptorq - -import ( - "bytes" - "context" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/cosmos/btcutil/base58" - "strconv" -) - -const ( - InputEncodeFileName = "input.data" - SeparatorByte byte = 46 // separator in dd_and_fingerprints.signature i.e. '.' -) - -// GetIDFiles generates ID Files for dd_and_fingerprints files and rq_id files -// file is b64 encoded file appended with signatures and compressed, ic is the initial counter -// and max is the number of ids to generate -func GetIDFiles(ctx context.Context, file []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) { - idFiles := make([][]byte, 0, max) - ids = make([]string, 0, max) - var buffer bytes.Buffer - - for i := uint32(0); i < max; i++ { - buffer.Reset() - counter := ic + i - - buffer.Write(file) - buffer.WriteByte(SeparatorByte) - buffer.WriteString(strconv.Itoa(int(counter))) // Using the string representation to maintain backward compatibility - - compressedData, err := utils.HighCompress(ctx, buffer.Bytes()) // Ensure you're using the same compression level - if err != nil { - return ids, idFiles, errors.Errorf("compress identifiers file: %w", err) - } - - idFiles = append(idFiles, compressedData) - - hash, err := utils.Blake3Hash(compressedData) - if err != nil { - return ids, idFiles, errors.Errorf("sha3-256-hash error getting an id file: %w", err) - } - - ids = append(ids, base58.Encode(hash)) - } - - return ids, idFiles, nil -} diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/services/cascade/adaptors/p2p.go index d1fd6ab9..93fed82a 100644 --- a/supernode/services/cascade/adaptors/p2p.go +++ b/supernode/services/cascade/adaptors/p2p.go @@ -15,7 +15,6 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/storage" "github.com/pkg/errors" ) @@ -27,6 +26,10 @@ const ( storeBatchContextTimeout = 3 * time.Minute ) +// Local P2P data type identifier used when storing via P2P. +// Value must remain stable to preserve DB semantics. +const P2PDataRaptorQSymbol = 1 + // P2PService defines the interface for storing data in the P2P layer. // //go:generate mockgen -destination=mocks/p2p_mock.go -package=cascadeadaptormocks -source=p2p.go @@ -38,8 +41,8 @@ type P2PService interface { // p2pImpl is the default implementation of the P2PService interface. type p2pImpl struct { - p2p p2p.Client - rqStore rqstore.Store + p2p p2p.Client + rqStore rqstore.Store } // NewP2PService returns a concrete implementation of P2PService. @@ -157,7 +160,7 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action // Send as the same data type you use for symbols logtrace.Info(ctx, "store: batch send (first)", logtrace.Fields{"taskID": taskID, "metadata_count": len(metadataFiles), "symbols_in_batch": len(symBytes), "payload_total": len(payload)}) bctx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) - err = p.p2p.StoreBatch(bctx, payload, storage.P2PDataRaptorQSymbol, taskID) + err = p.p2p.StoreBatch(bctx, payload, P2PDataRaptorQSymbol, taskID) cancel() if err != nil { return totalSymbols, totalAvailable, fmt.Errorf("p2p store batch (first): %w", err) @@ -217,6 +220,8 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action } +// walkSymbolTree returns relative file keys for symbols under `root`, +// skipping JSON layout files. func walkSymbolTree(root string) ([]string, error) { var keys []string err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { @@ -224,7 +229,7 @@ func walkSymbolTree(root string) ([]string, error) { return err // propagate I/O errors } if d.IsDir() { - return nil // skip directory nodes + return nil // skip directories } // ignore layout json if present if strings.EqualFold(filepath.Ext(d.Name()), ".json") { @@ -257,7 +262,7 @@ func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fi defer cancel() logtrace.Info(ctx, "store: batch send (symbols)", logtrace.Fields{"taskID": taskID, "symbols_in_batch": len(symbols)}) - if err := c.p2p.StoreBatch(symCtx, symbols, storage.P2PDataRaptorQSymbol, taskID); err != nil { + if err := c.p2p.StoreBatch(symCtx, symbols, P2PDataRaptorQSymbol, taskID); err != nil { return len(symbols), fmt.Errorf("p2p store batch: %w", err) } logtrace.Info(ctx, "store: batch ok (symbols)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symbols)}) diff --git a/supernode/services/cascade/task.go b/supernode/services/cascade/task.go index 5dcffa34..43eb1181 100644 --- a/supernode/services/cascade/task.go +++ b/supernode/services/cascade/task.go @@ -1,23 +1,21 @@ package cascade import ( - "context" + "context" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/base" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/storage" + "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" + "github.com/LumeraProtocol/supernode/v2/supernode/services/common/base" ) // CascadeRegistrationTask is the task for cascade registration type CascadeRegistrationTask struct { - *CascadeService + *CascadeService - *base.SuperNodeTask - storage *storage.StorageHandler + *base.SuperNodeTask - Asset *files.File - dataHash string - creatorSignature []byte + Asset *files.File + dataHash string + creatorSignature []byte } const ( diff --git a/supernode/services/common/storage/handler.go b/supernode/services/common/storage/handler.go deleted file mode 100644 index 9e570d03..00000000 --- a/supernode/services/common/storage/handler.go +++ /dev/null @@ -1,180 +0,0 @@ -package storage - -import ( - "context" - "fmt" - "io/fs" - "math" - "math/rand/v2" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" -) - -const ( - loadSymbolsBatchSize = 2500 - storeSymbolsPercent = 10 - concurrency = 1 - - UnknownDataType = iota // 1 - P2PDataRaptorQSymbol // 1 - P2PDataCascadeMetadata // 2 -) - -// StorageHandler provides common logic for RQ and P2P operations -type StorageHandler struct { - P2PClient p2p.Client - rqDir string - - TaskID string - TxID string - - store rqstore.Store - semaphore chan struct{} -} - -// NewStorageHandler creates instance of StorageHandler -func NewStorageHandler(p2p p2p.Client, rqDir string, store rqstore.Store) *StorageHandler { - return &StorageHandler{ - P2PClient: p2p, - rqDir: rqDir, - store: store, - semaphore: make(chan struct{}, concurrency), - } -} - -// StoreFileIntoP2P stores file into P2P -func (h *StorageHandler) StoreFileIntoP2P(ctx context.Context, file *files.File, typ int) (string, error) { - data, err := file.Bytes() - if err != nil { - return "", errors.Errorf("store file %s into p2p", file.Name()) - } - return h.StoreBytesIntoP2P(ctx, data, typ) -} - -// StoreBytesIntoP2P into P2P actual data -func (h *StorageHandler) StoreBytesIntoP2P(ctx context.Context, data []byte, typ int) (string, error) { - return h.P2PClient.Store(ctx, data, typ) -} - -// StoreBatch stores into P2P an array of byte slices. -func (h *StorageHandler) StoreBatch(ctx context.Context, list [][]byte, typ int) error { - val := ctx.Value(logtrace.CorrelationIDKey) - taskID := "" - if val != nil { - taskID = fmt.Sprintf("%v", val) - } - - logtrace.Debug(ctx, "task_id in storeList", logtrace.Fields{logtrace.FieldTaskID: taskID}) - return h.P2PClient.StoreBatch(ctx, list, typ, taskID) -} - -// StoreRaptorQSymbolsIntoP2P stores RaptorQ symbols into P2P -// It first records the directory in the database, then gathers all symbol paths -// under the specified directory. If the number of keys exceeds a certain threshold, -// it randomly samples a percentage of them. Finally, it streams the symbols in -// fixed-size batches to the P2P network. -// -// Note: P2P client returns (ratePct, requests, err) for each batch; we ignore -// the metrics here and only validate error semantics. -func (h *StorageHandler) StoreRaptorQSymbolsIntoP2P(ctx context.Context, taskID, symbolsDir string) error { - /* record directory in DB */ - if err := h.store.StoreSymbolDirectory(taskID, symbolsDir); err != nil { - return fmt.Errorf("store symbol dir: %w", err) - } - - /* gather every symbol path under symbolsDir ------------------------- */ - keys, err := walkSymbolTree(symbolsDir) - if err != nil { - return err - } - - /* down-sample if we exceed the "big directory" threshold ------------- */ - if len(keys) > loadSymbolsBatchSize { - want := int(math.Ceil(float64(len(keys)) * storeSymbolsPercent / 100)) - if want < len(keys) { - rand.Shuffle(len(keys), func(i, j int) { keys[i], keys[j] = keys[j], keys[i] }) - keys = keys[:want] - } - sort.Strings(keys) // deterministic order inside the sample - } - - logtrace.Debug(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)}) - - /* stream in fixed-size batches -------------------------------------- */ - for start := 0; start < len(keys); { - end := start + loadSymbolsBatchSize - if end > len(keys) { - end = len(keys) - } - if err := h.storeSymbolsInP2P(ctx, taskID, symbolsDir, keys[start:end]); err != nil { - return err - } - start = end - } - - if err := h.store.UpdateIsFirstBatchStored(h.TxID); err != nil { - return fmt.Errorf("update first-batch flag: %w", err) - } - - logtrace.Debug(ctx, "finished storing RaptorQ symbols", logtrace.Fields{"curr-time": time.Now().UTC(), "count": len(keys)}) - - return nil -} - -func walkSymbolTree(root string) ([]string, error) { - var keys []string - err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err // propagate I/O errors - } - if d.IsDir() { - return nil // skip directory nodes - } - // ignore layout json if present - if strings.EqualFold(filepath.Ext(d.Name()), ".json") { - return nil - } - rel, err := filepath.Rel(root, path) - if err != nil { - return err - } - keys = append(keys, rel) // store as "block_0/filename" - return nil - }) - if err != nil { - return nil, fmt.Errorf("walk symbol tree: %w", err) - } - return keys, nil -} - -func (h *StorageHandler) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) error { - logtrace.Debug(ctx, "loading batch symbols", logtrace.Fields{"count": len(fileKeys)}) - - symbols, err := utils.LoadSymbols(root, fileKeys) - if err != nil { - return fmt.Errorf("load symbols: %w", err) - } - - if err := h.P2PClient.StoreBatch(ctx, symbols, P2PDataRaptorQSymbol, taskID); err != nil { - return fmt.Errorf("p2p store batch: %w", err) - } - - logtrace.Debug(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)}) - - if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { - return fmt.Errorf("delete symbols: %w", err) - } - - logtrace.Debug(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)}) - - return nil -} diff --git a/supernode/services/common/storage/handler_test.go b/supernode/services/common/storage/handler_test.go deleted file mode 100644 index fd4e0d8e..00000000 --- a/supernode/services/common/storage/handler_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package storage - -import ( - "context" - "testing" - - "github.com/LumeraProtocol/supernode/v2/p2p/mocks" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -// --- Mocks --- - -type mockP2PClient struct { - mocks.Client -} - -type mockStore struct { - mock.Mock -} - -func (m *mockStore) StoreSymbolDirectory(taskID, dir string) error { - args := m.Called(taskID, dir) - return args.Error(0) -} - -func (m *mockStore) UpdateIsFirstBatchStored(txID string) error { - args := m.Called(txID) - return args.Error(0) -} - -func TestStoreBytesIntoP2P(t *testing.T) { - p2pClient := new(mockP2PClient) - handler := NewStorageHandler(p2pClient, "", nil) - - data := []byte("hello") - p2pClient.On("Store", mock.Anything, data, 1).Return("some-id", nil) - - id, err := handler.StoreBytesIntoP2P(context.Background(), data, 1) - assert.NoError(t, err) - assert.Equal(t, "some-id", id) - p2pClient.AssertExpectations(t) -} - -func TestStoreBatch(t *testing.T) { - p2pClient := new(mockP2PClient) - handler := NewStorageHandler(p2pClient, "", nil) - - ctx := context.WithValue(context.Background(), "task_id", "123") - list := [][]byte{[]byte("a"), []byte("b")} - // StoreBatch now returns error only - p2pClient.On("StoreBatch", mock.Anything, list, 3, "").Return(nil) - - err := handler.StoreBatch(ctx, list, 3) - assert.NoError(t, err) -} From d115fe6440c66dd92fa28e54d1db056d13e95fcf Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Sat, 4 Oct 2025 02:40:25 +0500 Subject: [PATCH 03/36] Cleanup --- gen/dupedetection/dd-server.pb.go | 1263 ------------------------ gen/dupedetection/dd-server_grpc.pb.go | 150 --- gen/raptorq/raptorq.pb.go | 476 --------- gen/raptorq/raptorq_grpc.pb.go | 187 ---- gen/supernode/agents/.gitkeep | 0 pkg/dd/client.go | 46 - pkg/dd/config.go | 50 - pkg/dd/connection.go | 23 - pkg/dd/dd_mock.go | 162 --- pkg/dd/dd_server_client.go | 24 - pkg/dd/image_rareness.go | 108 -- pkg/dd/interfaces.go | 30 - pkg/dd/status.go | 44 - proto/dupedetection/dd-server.proto | 108 -- proto/raptorq/raptorq.proto | 46 - 15 files changed, 2717 deletions(-) delete mode 100644 gen/dupedetection/dd-server.pb.go delete mode 100644 gen/dupedetection/dd-server_grpc.pb.go delete mode 100644 gen/raptorq/raptorq.pb.go delete mode 100644 gen/raptorq/raptorq_grpc.pb.go delete mode 100644 gen/supernode/agents/.gitkeep delete mode 100644 pkg/dd/client.go delete mode 100644 pkg/dd/config.go delete mode 100644 pkg/dd/connection.go delete mode 100644 pkg/dd/dd_mock.go delete mode 100644 pkg/dd/dd_server_client.go delete mode 100644 pkg/dd/image_rareness.go delete mode 100644 pkg/dd/interfaces.go delete mode 100644 pkg/dd/status.go delete mode 100644 proto/dupedetection/dd-server.proto delete mode 100644 proto/raptorq/raptorq.proto diff --git a/gen/dupedetection/dd-server.pb.go b/gen/dupedetection/dd-server.pb.go deleted file mode 100644 index 69e63a6d..00000000 --- a/gen/dupedetection/dd-server.pb.go +++ /dev/null @@ -1,1263 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.35.1 -// protoc v3.12.4 -// source: dd-server.proto - -package dupedetection - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type RarenessScoreRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ImageFilepath string `protobuf:"bytes,1,opt,name=image_filepath,json=imageFilepath,proto3" json:"image_filepath,omitempty"` - PastelBlockHashWhenRequestSubmitted string `protobuf:"bytes,2,opt,name=pastel_block_hash_when_request_submitted,json=pastelBlockHashWhenRequestSubmitted,proto3" json:"pastel_block_hash_when_request_submitted,omitempty"` - PastelBlockHeightWhenRequestSubmitted string `protobuf:"bytes,3,opt,name=pastel_block_height_when_request_submitted,json=pastelBlockHeightWhenRequestSubmitted,proto3" json:"pastel_block_height_when_request_submitted,omitempty"` - UtcTimestampWhenRequestSubmitted string `protobuf:"bytes,4,opt,name=utc_timestamp_when_request_submitted,json=utcTimestampWhenRequestSubmitted,proto3" json:"utc_timestamp_when_request_submitted,omitempty"` - PastelIdOfSubmitter string `protobuf:"bytes,5,opt,name=pastel_id_of_submitter,json=pastelIdOfSubmitter,proto3" json:"pastel_id_of_submitter,omitempty"` - PastelIdOfRegisteringSupernode_1 string `protobuf:"bytes,6,opt,name=pastel_id_of_registering_supernode_1,json=pastelIdOfRegisteringSupernode1,proto3" json:"pastel_id_of_registering_supernode_1,omitempty"` - PastelIdOfRegisteringSupernode_2 string `protobuf:"bytes,7,opt,name=pastel_id_of_registering_supernode_2,json=pastelIdOfRegisteringSupernode2,proto3" json:"pastel_id_of_registering_supernode_2,omitempty"` - PastelIdOfRegisteringSupernode_3 string `protobuf:"bytes,8,opt,name=pastel_id_of_registering_supernode_3,json=pastelIdOfRegisteringSupernode3,proto3" json:"pastel_id_of_registering_supernode_3,omitempty"` - IsPastelOpenapiRequest bool `protobuf:"varint,9,opt,name=is_pastel_openapi_request,json=isPastelOpenapiRequest,proto3" json:"is_pastel_openapi_request,omitempty"` - OpenApiGroupIdString string `protobuf:"bytes,10,opt,name=open_api_group_id_string,json=openApiGroupIdString,proto3" json:"open_api_group_id_string,omitempty"` - CollectionNameString string `protobuf:"bytes,11,opt,name=collection_name_string,json=collectionNameString,proto3" json:"collection_name_string,omitempty"` -} - -func (x *RarenessScoreRequest) Reset() { - *x = RarenessScoreRequest{} - mi := &file_dd_server_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RarenessScoreRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RarenessScoreRequest) ProtoMessage() {} - -func (x *RarenessScoreRequest) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RarenessScoreRequest.ProtoReflect.Descriptor instead. -func (*RarenessScoreRequest) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{0} -} - -func (x *RarenessScoreRequest) GetImageFilepath() string { - if x != nil { - return x.ImageFilepath - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelBlockHashWhenRequestSubmitted() string { - if x != nil { - return x.PastelBlockHashWhenRequestSubmitted - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelBlockHeightWhenRequestSubmitted() string { - if x != nil { - return x.PastelBlockHeightWhenRequestSubmitted - } - return "" -} - -func (x *RarenessScoreRequest) GetUtcTimestampWhenRequestSubmitted() string { - if x != nil { - return x.UtcTimestampWhenRequestSubmitted - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelIdOfSubmitter() string { - if x != nil { - return x.PastelIdOfSubmitter - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelIdOfRegisteringSupernode_1() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_1 - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelIdOfRegisteringSupernode_2() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_2 - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelIdOfRegisteringSupernode_3() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_3 - } - return "" -} - -func (x *RarenessScoreRequest) GetIsPastelOpenapiRequest() bool { - if x != nil { - return x.IsPastelOpenapiRequest - } - return false -} - -func (x *RarenessScoreRequest) GetOpenApiGroupIdString() string { - if x != nil { - return x.OpenApiGroupIdString - } - return "" -} - -func (x *RarenessScoreRequest) GetCollectionNameString() string { - if x != nil { - return x.CollectionNameString - } - return "" -} - -type ImageRarenessScoreReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PastelBlockHashWhenRequestSubmitted string `protobuf:"bytes,1,opt,name=pastel_block_hash_when_request_submitted,json=pastelBlockHashWhenRequestSubmitted,proto3" json:"pastel_block_hash_when_request_submitted,omitempty"` - PastelBlockHeightWhenRequestSubmitted string `protobuf:"bytes,2,opt,name=pastel_block_height_when_request_submitted,json=pastelBlockHeightWhenRequestSubmitted,proto3" json:"pastel_block_height_when_request_submitted,omitempty"` - UtcTimestampWhenRequestSubmitted string `protobuf:"bytes,3,opt,name=utc_timestamp_when_request_submitted,json=utcTimestampWhenRequestSubmitted,proto3" json:"utc_timestamp_when_request_submitted,omitempty"` - PastelIdOfSubmitter string `protobuf:"bytes,4,opt,name=pastel_id_of_submitter,json=pastelIdOfSubmitter,proto3" json:"pastel_id_of_submitter,omitempty"` - PastelIdOfRegisteringSupernode_1 string `protobuf:"bytes,5,opt,name=pastel_id_of_registering_supernode_1,json=pastelIdOfRegisteringSupernode1,proto3" json:"pastel_id_of_registering_supernode_1,omitempty"` - PastelIdOfRegisteringSupernode_2 string `protobuf:"bytes,6,opt,name=pastel_id_of_registering_supernode_2,json=pastelIdOfRegisteringSupernode2,proto3" json:"pastel_id_of_registering_supernode_2,omitempty"` - PastelIdOfRegisteringSupernode_3 string `protobuf:"bytes,7,opt,name=pastel_id_of_registering_supernode_3,json=pastelIdOfRegisteringSupernode3,proto3" json:"pastel_id_of_registering_supernode_3,omitempty"` - IsPastelOpenapiRequest bool `protobuf:"varint,8,opt,name=is_pastel_openapi_request,json=isPastelOpenapiRequest,proto3" json:"is_pastel_openapi_request,omitempty"` - ImageFilePath string `protobuf:"bytes,9,opt,name=image_file_path,json=imageFilePath,proto3" json:"image_file_path,omitempty"` - DupeDetectionSystemVersion string `protobuf:"bytes,10,opt,name=dupe_detection_system_version,json=dupeDetectionSystemVersion,proto3" json:"dupe_detection_system_version,omitempty"` - IsLikelyDupe bool `protobuf:"varint,11,opt,name=is_likely_dupe,json=isLikelyDupe,proto3" json:"is_likely_dupe,omitempty"` - IsRareOnInternet bool `protobuf:"varint,12,opt,name=is_rare_on_internet,json=isRareOnInternet,proto3" json:"is_rare_on_internet,omitempty"` - OverallRarenessScore float32 `protobuf:"fixed32,13,opt,name=overall_rareness_score,json=overallRarenessScore,proto3" json:"overall_rareness_score,omitempty"` - PctOfTop_10MostSimilarWithDupeProbAbove_25Pct float32 `protobuf:"fixed32,14,opt,name=pct_of_top_10_most_similar_with_dupe_prob_above_25pct,json=pctOfTop10MostSimilarWithDupeProbAbove25pct,proto3" json:"pct_of_top_10_most_similar_with_dupe_prob_above_25pct,omitempty"` - PctOfTop_10MostSimilarWithDupeProbAbove_33Pct float32 `protobuf:"fixed32,15,opt,name=pct_of_top_10_most_similar_with_dupe_prob_above_33pct,json=pctOfTop10MostSimilarWithDupeProbAbove33pct,proto3" json:"pct_of_top_10_most_similar_with_dupe_prob_above_33pct,omitempty"` - PctOfTop_10MostSimilarWithDupeProbAbove_50Pct float32 `protobuf:"fixed32,16,opt,name=pct_of_top_10_most_similar_with_dupe_prob_above_50pct,json=pctOfTop10MostSimilarWithDupeProbAbove50pct,proto3" json:"pct_of_top_10_most_similar_with_dupe_prob_above_50pct,omitempty"` - RarenessScoresTableJsonCompressedB64 string `protobuf:"bytes,17,opt,name=rareness_scores_table_json_compressed_b64,json=rarenessScoresTableJsonCompressedB64,proto3" json:"rareness_scores_table_json_compressed_b64,omitempty"` - InternetRareness *InternetRareness `protobuf:"bytes,18,opt,name=internet_rareness,json=internetRareness,proto3" json:"internet_rareness,omitempty"` - OpenNsfwScore float32 `protobuf:"fixed32,19,opt,name=open_nsfw_score,json=openNsfwScore,proto3" json:"open_nsfw_score,omitempty"` - AlternativeNsfwScores *AltNsfwScores `protobuf:"bytes,20,opt,name=alternative_nsfw_scores,json=alternativeNsfwScores,proto3" json:"alternative_nsfw_scores,omitempty"` - ImageFingerprintOfCandidateImageFile []float64 `protobuf:"fixed64,21,rep,packed,name=image_fingerprint_of_candidate_image_file,json=imageFingerprintOfCandidateImageFile,proto3" json:"image_fingerprint_of_candidate_image_file,omitempty"` - CollectionNameString string `protobuf:"bytes,22,opt,name=collection_name_string,json=collectionNameString,proto3" json:"collection_name_string,omitempty"` - HashOfCandidateImageFile string `protobuf:"bytes,23,opt,name=hash_of_candidate_image_file,json=hashOfCandidateImageFile,proto3" json:"hash_of_candidate_image_file,omitempty"` - OpenApiGroupIdString string `protobuf:"bytes,24,opt,name=open_api_group_id_string,json=openApiGroupIdString,proto3" json:"open_api_group_id_string,omitempty"` - GroupRarenessScore float32 `protobuf:"fixed32,25,opt,name=group_rareness_score,json=groupRarenessScore,proto3" json:"group_rareness_score,omitempty"` - CandidateImageThumbnailWebpAsBase64String string `protobuf:"bytes,26,opt,name=candidate_image_thumbnail_webp_as_base64_string,json=candidateImageThumbnailWebpAsBase64String,proto3" json:"candidate_image_thumbnail_webp_as_base64_string,omitempty"` - DoesNotImpactTheFollowingCollectionStrings string `protobuf:"bytes,27,opt,name=does_not_impact_the_following_collection_strings,json=doesNotImpactTheFollowingCollectionStrings,proto3" json:"does_not_impact_the_following_collection_strings,omitempty"` - IsInvalidSenseRequest bool `protobuf:"varint,28,opt,name=is_invalid_sense_request,json=isInvalidSenseRequest,proto3" json:"is_invalid_sense_request,omitempty"` - InvalidSenseRequestReason string `protobuf:"bytes,29,opt,name=invalid_sense_request_reason,json=invalidSenseRequestReason,proto3" json:"invalid_sense_request_reason,omitempty"` - SimilarityScoreToFirstEntryInCollection float32 `protobuf:"fixed32,30,opt,name=similarity_score_to_first_entry_in_collection,json=similarityScoreToFirstEntryInCollection,proto3" json:"similarity_score_to_first_entry_in_collection,omitempty"` - CpProbability float32 `protobuf:"fixed32,31,opt,name=cp_probability,json=cpProbability,proto3" json:"cp_probability,omitempty"` - ChildProbability float32 `protobuf:"fixed32,32,opt,name=child_probability,json=childProbability,proto3" json:"child_probability,omitempty"` - ImageFingerprintSetChecksum string `protobuf:"bytes,33,opt,name=image_fingerprint_set_checksum,json=imageFingerprintSetChecksum,proto3" json:"image_fingerprint_set_checksum,omitempty"` -} - -func (x *ImageRarenessScoreReply) Reset() { - *x = ImageRarenessScoreReply{} - mi := &file_dd_server_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ImageRarenessScoreReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ImageRarenessScoreReply) ProtoMessage() {} - -func (x *ImageRarenessScoreReply) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ImageRarenessScoreReply.ProtoReflect.Descriptor instead. -func (*ImageRarenessScoreReply) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{1} -} - -func (x *ImageRarenessScoreReply) GetPastelBlockHashWhenRequestSubmitted() string { - if x != nil { - return x.PastelBlockHashWhenRequestSubmitted - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelBlockHeightWhenRequestSubmitted() string { - if x != nil { - return x.PastelBlockHeightWhenRequestSubmitted - } - return "" -} - -func (x *ImageRarenessScoreReply) GetUtcTimestampWhenRequestSubmitted() string { - if x != nil { - return x.UtcTimestampWhenRequestSubmitted - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelIdOfSubmitter() string { - if x != nil { - return x.PastelIdOfSubmitter - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelIdOfRegisteringSupernode_1() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_1 - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelIdOfRegisteringSupernode_2() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_2 - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelIdOfRegisteringSupernode_3() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_3 - } - return "" -} - -func (x *ImageRarenessScoreReply) GetIsPastelOpenapiRequest() bool { - if x != nil { - return x.IsPastelOpenapiRequest - } - return false -} - -func (x *ImageRarenessScoreReply) GetImageFilePath() string { - if x != nil { - return x.ImageFilePath - } - return "" -} - -func (x *ImageRarenessScoreReply) GetDupeDetectionSystemVersion() string { - if x != nil { - return x.DupeDetectionSystemVersion - } - return "" -} - -func (x *ImageRarenessScoreReply) GetIsLikelyDupe() bool { - if x != nil { - return x.IsLikelyDupe - } - return false -} - -func (x *ImageRarenessScoreReply) GetIsRareOnInternet() bool { - if x != nil { - return x.IsRareOnInternet - } - return false -} - -func (x *ImageRarenessScoreReply) GetOverallRarenessScore() float32 { - if x != nil { - return x.OverallRarenessScore - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetPctOfTop_10MostSimilarWithDupeProbAbove_25Pct() float32 { - if x != nil { - return x.PctOfTop_10MostSimilarWithDupeProbAbove_25Pct - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetPctOfTop_10MostSimilarWithDupeProbAbove_33Pct() float32 { - if x != nil { - return x.PctOfTop_10MostSimilarWithDupeProbAbove_33Pct - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetPctOfTop_10MostSimilarWithDupeProbAbove_50Pct() float32 { - if x != nil { - return x.PctOfTop_10MostSimilarWithDupeProbAbove_50Pct - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetRarenessScoresTableJsonCompressedB64() string { - if x != nil { - return x.RarenessScoresTableJsonCompressedB64 - } - return "" -} - -func (x *ImageRarenessScoreReply) GetInternetRareness() *InternetRareness { - if x != nil { - return x.InternetRareness - } - return nil -} - -func (x *ImageRarenessScoreReply) GetOpenNsfwScore() float32 { - if x != nil { - return x.OpenNsfwScore - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetAlternativeNsfwScores() *AltNsfwScores { - if x != nil { - return x.AlternativeNsfwScores - } - return nil -} - -func (x *ImageRarenessScoreReply) GetImageFingerprintOfCandidateImageFile() []float64 { - if x != nil { - return x.ImageFingerprintOfCandidateImageFile - } - return nil -} - -func (x *ImageRarenessScoreReply) GetCollectionNameString() string { - if x != nil { - return x.CollectionNameString - } - return "" -} - -func (x *ImageRarenessScoreReply) GetHashOfCandidateImageFile() string { - if x != nil { - return x.HashOfCandidateImageFile - } - return "" -} - -func (x *ImageRarenessScoreReply) GetOpenApiGroupIdString() string { - if x != nil { - return x.OpenApiGroupIdString - } - return "" -} - -func (x *ImageRarenessScoreReply) GetGroupRarenessScore() float32 { - if x != nil { - return x.GroupRarenessScore - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetCandidateImageThumbnailWebpAsBase64String() string { - if x != nil { - return x.CandidateImageThumbnailWebpAsBase64String - } - return "" -} - -func (x *ImageRarenessScoreReply) GetDoesNotImpactTheFollowingCollectionStrings() string { - if x != nil { - return x.DoesNotImpactTheFollowingCollectionStrings - } - return "" -} - -func (x *ImageRarenessScoreReply) GetIsInvalidSenseRequest() bool { - if x != nil { - return x.IsInvalidSenseRequest - } - return false -} - -func (x *ImageRarenessScoreReply) GetInvalidSenseRequestReason() string { - if x != nil { - return x.InvalidSenseRequestReason - } - return "" -} - -func (x *ImageRarenessScoreReply) GetSimilarityScoreToFirstEntryInCollection() float32 { - if x != nil { - return x.SimilarityScoreToFirstEntryInCollection - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetCpProbability() float32 { - if x != nil { - return x.CpProbability - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetChildProbability() float32 { - if x != nil { - return x.ChildProbability - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetImageFingerprintSetChecksum() string { - if x != nil { - return x.ImageFingerprintSetChecksum - } - return "" -} - -type InternetRareness struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RareOnInternetSummaryTableAsJsonCompressedB64 string `protobuf:"bytes,1,opt,name=rare_on_internet_summary_table_as_json_compressed_b64,json=rareOnInternetSummaryTableAsJsonCompressedB64,proto3" json:"rare_on_internet_summary_table_as_json_compressed_b64,omitempty"` - RareOnInternetGraphJsonCompressedB64 string `protobuf:"bytes,2,opt,name=rare_on_internet_graph_json_compressed_b64,json=rareOnInternetGraphJsonCompressedB64,proto3" json:"rare_on_internet_graph_json_compressed_b64,omitempty"` - AlternativeRareOnInternetDictAsJsonCompressedB64 string `protobuf:"bytes,3,opt,name=alternative_rare_on_internet_dict_as_json_compressed_b64,json=alternativeRareOnInternetDictAsJsonCompressedB64,proto3" json:"alternative_rare_on_internet_dict_as_json_compressed_b64,omitempty"` - MinNumberOfExactMatchesInPage uint32 `protobuf:"varint,4,opt,name=min_number_of_exact_matches_in_page,json=minNumberOfExactMatchesInPage,proto3" json:"min_number_of_exact_matches_in_page,omitempty"` - EarliestAvailableDateOfInternetResults string `protobuf:"bytes,5,opt,name=earliest_available_date_of_internet_results,json=earliestAvailableDateOfInternetResults,proto3" json:"earliest_available_date_of_internet_results,omitempty"` -} - -func (x *InternetRareness) Reset() { - *x = InternetRareness{} - mi := &file_dd_server_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *InternetRareness) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InternetRareness) ProtoMessage() {} - -func (x *InternetRareness) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InternetRareness.ProtoReflect.Descriptor instead. -func (*InternetRareness) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{2} -} - -func (x *InternetRareness) GetRareOnInternetSummaryTableAsJsonCompressedB64() string { - if x != nil { - return x.RareOnInternetSummaryTableAsJsonCompressedB64 - } - return "" -} - -func (x *InternetRareness) GetRareOnInternetGraphJsonCompressedB64() string { - if x != nil { - return x.RareOnInternetGraphJsonCompressedB64 - } - return "" -} - -func (x *InternetRareness) GetAlternativeRareOnInternetDictAsJsonCompressedB64() string { - if x != nil { - return x.AlternativeRareOnInternetDictAsJsonCompressedB64 - } - return "" -} - -func (x *InternetRareness) GetMinNumberOfExactMatchesInPage() uint32 { - if x != nil { - return x.MinNumberOfExactMatchesInPage - } - return 0 -} - -func (x *InternetRareness) GetEarliestAvailableDateOfInternetResults() string { - if x != nil { - return x.EarliestAvailableDateOfInternetResults - } - return "" -} - -type AltNsfwScores struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Drawings float32 `protobuf:"fixed32,1,opt,name=drawings,proto3" json:"drawings,omitempty"` - Hentai float32 `protobuf:"fixed32,2,opt,name=hentai,proto3" json:"hentai,omitempty"` - Neutral float32 `protobuf:"fixed32,3,opt,name=neutral,proto3" json:"neutral,omitempty"` - Porn float32 `protobuf:"fixed32,4,opt,name=porn,proto3" json:"porn,omitempty"` - Sexy float32 `protobuf:"fixed32,5,opt,name=sexy,proto3" json:"sexy,omitempty"` -} - -func (x *AltNsfwScores) Reset() { - *x = AltNsfwScores{} - mi := &file_dd_server_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *AltNsfwScores) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AltNsfwScores) ProtoMessage() {} - -func (x *AltNsfwScores) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AltNsfwScores.ProtoReflect.Descriptor instead. -func (*AltNsfwScores) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{3} -} - -func (x *AltNsfwScores) GetDrawings() float32 { - if x != nil { - return x.Drawings - } - return 0 -} - -func (x *AltNsfwScores) GetHentai() float32 { - if x != nil { - return x.Hentai - } - return 0 -} - -func (x *AltNsfwScores) GetNeutral() float32 { - if x != nil { - return x.Neutral - } - return 0 -} - -func (x *AltNsfwScores) GetPorn() float32 { - if x != nil { - return x.Porn - } - return 0 -} - -func (x *AltNsfwScores) GetSexy() float32 { - if x != nil { - return x.Sexy - } - return 0 -} - -type GetStatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetStatusRequest) Reset() { - *x = GetStatusRequest{} - mi := &file_dd_server_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetStatusRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetStatusRequest) ProtoMessage() {} - -func (x *GetStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetStatusRequest.ProtoReflect.Descriptor instead. -func (*GetStatusRequest) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{4} -} - -type TaskCount struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - MaxConcurrent int32 `protobuf:"varint,1,opt,name=max_concurrent,json=maxConcurrent,proto3" json:"max_concurrent,omitempty"` - Executing int32 `protobuf:"varint,2,opt,name=executing,proto3" json:"executing,omitempty"` - WaitingInQueue int32 `protobuf:"varint,3,opt,name=waiting_in_queue,json=waitingInQueue,proto3" json:"waiting_in_queue,omitempty"` - Succeeded int32 `protobuf:"varint,4,opt,name=succeeded,proto3" json:"succeeded,omitempty"` - Failed int32 `protobuf:"varint,5,opt,name=failed,proto3" json:"failed,omitempty"` - Cancelled int32 `protobuf:"varint,6,opt,name=cancelled,proto3" json:"cancelled,omitempty"` -} - -func (x *TaskCount) Reset() { - *x = TaskCount{} - mi := &file_dd_server_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskCount) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskCount) ProtoMessage() {} - -func (x *TaskCount) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskCount.ProtoReflect.Descriptor instead. -func (*TaskCount) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{5} -} - -func (x *TaskCount) GetMaxConcurrent() int32 { - if x != nil { - return x.MaxConcurrent - } - return 0 -} - -func (x *TaskCount) GetExecuting() int32 { - if x != nil { - return x.Executing - } - return 0 -} - -func (x *TaskCount) GetWaitingInQueue() int32 { - if x != nil { - return x.WaitingInQueue - } - return 0 -} - -func (x *TaskCount) GetSucceeded() int32 { - if x != nil { - return x.Succeeded - } - return 0 -} - -func (x *TaskCount) GetFailed() int32 { - if x != nil { - return x.Failed - } - return 0 -} - -func (x *TaskCount) GetCancelled() int32 { - if x != nil { - return x.Cancelled - } - return 0 -} - -type TaskMetrics struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AverageTaskWaitTimeSecs float32 `protobuf:"fixed32,1,opt,name=average_task_wait_time_secs,json=averageTaskWaitTimeSecs,proto3" json:"average_task_wait_time_secs,omitempty"` - MaxTaskWaitTimeSecs float32 `protobuf:"fixed32,2,opt,name=max_task_wait_time_secs,json=maxTaskWaitTimeSecs,proto3" json:"max_task_wait_time_secs,omitempty"` - AverageTaskExecutionTimeSecs float32 `protobuf:"fixed32,3,opt,name=average_task_execution_time_secs,json=averageTaskExecutionTimeSecs,proto3" json:"average_task_execution_time_secs,omitempty"` - AverageTaskVirtualMemoryUsageBytes int64 `protobuf:"varint,4,opt,name=average_task_virtual_memory_usage_bytes,json=averageTaskVirtualMemoryUsageBytes,proto3" json:"average_task_virtual_memory_usage_bytes,omitempty"` - AverageTaskRssMemoryUsageBytes int64 `protobuf:"varint,5,opt,name=average_task_rss_memory_usage_bytes,json=averageTaskRssMemoryUsageBytes,proto3" json:"average_task_rss_memory_usage_bytes,omitempty"` - PeakTaskRssMemoryUsageBytes int64 `protobuf:"varint,6,opt,name=peak_task_rss_memory_usage_bytes,json=peakTaskRssMemoryUsageBytes,proto3" json:"peak_task_rss_memory_usage_bytes,omitempty"` - PeakTaskVmsMemoryUsageBytes int64 `protobuf:"varint,7,opt,name=peak_task_vms_memory_usage_bytes,json=peakTaskVmsMemoryUsageBytes,proto3" json:"peak_task_vms_memory_usage_bytes,omitempty"` -} - -func (x *TaskMetrics) Reset() { - *x = TaskMetrics{} - mi := &file_dd_server_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskMetrics) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskMetrics) ProtoMessage() {} - -func (x *TaskMetrics) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskMetrics.ProtoReflect.Descriptor instead. -func (*TaskMetrics) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{6} -} - -func (x *TaskMetrics) GetAverageTaskWaitTimeSecs() float32 { - if x != nil { - return x.AverageTaskWaitTimeSecs - } - return 0 -} - -func (x *TaskMetrics) GetMaxTaskWaitTimeSecs() float32 { - if x != nil { - return x.MaxTaskWaitTimeSecs - } - return 0 -} - -func (x *TaskMetrics) GetAverageTaskExecutionTimeSecs() float32 { - if x != nil { - return x.AverageTaskExecutionTimeSecs - } - return 0 -} - -func (x *TaskMetrics) GetAverageTaskVirtualMemoryUsageBytes() int64 { - if x != nil { - return x.AverageTaskVirtualMemoryUsageBytes - } - return 0 -} - -func (x *TaskMetrics) GetAverageTaskRssMemoryUsageBytes() int64 { - if x != nil { - return x.AverageTaskRssMemoryUsageBytes - } - return 0 -} - -func (x *TaskMetrics) GetPeakTaskRssMemoryUsageBytes() int64 { - if x != nil { - return x.PeakTaskRssMemoryUsageBytes - } - return 0 -} - -func (x *TaskMetrics) GetPeakTaskVmsMemoryUsageBytes() int64 { - if x != nil { - return x.PeakTaskVmsMemoryUsageBytes - } - return 0 -} - -type GetStatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - TaskCount *TaskCount `protobuf:"bytes,2,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"` - TaskMetrics *TaskMetrics `protobuf:"bytes,3,opt,name=task_metrics,json=taskMetrics,proto3" json:"task_metrics,omitempty"` -} - -func (x *GetStatusResponse) Reset() { - *x = GetStatusResponse{} - mi := &file_dd_server_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetStatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetStatusResponse) ProtoMessage() {} - -func (x *GetStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetStatusResponse.ProtoReflect.Descriptor instead. -func (*GetStatusResponse) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{7} -} - -func (x *GetStatusResponse) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *GetStatusResponse) GetTaskCount() *TaskCount { - if x != nil { - return x.TaskCount - } - return nil -} - -func (x *GetStatusResponse) GetTaskMetrics() *TaskMetrics { - if x != nil { - return x.TaskMetrics - } - return nil -} - -var File_dd_server_proto protoreflect.FileDescriptor - -var file_dd_server_proto_rawDesc = []byte{ - 0x0a, 0x0f, 0x64, 0x64, 0x2d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0d, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x8a, 0x06, 0x0a, 0x14, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, 0x63, 0x6f, - 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6d, 0x61, - 0x67, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, - 0x12, 0x55, 0x0a, 0x28, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x77, 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x23, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, - 0x61, 0x73, 0x68, 0x57, 0x68, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x75, - 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x59, 0x0a, 0x2a, 0x70, 0x61, 0x73, 0x74, 0x65, - 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x77, - 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, - 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x25, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x57, 0x68, - 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, - 0x65, 0x64, 0x12, 0x4e, 0x0a, 0x24, 0x75, 0x74, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x5f, 0x77, 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x20, 0x75, 0x74, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x57, 0x68, - 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, - 0x65, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, - 0x6f, 0x66, 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x13, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x53, 0x75, - 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, - 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x31, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, - 0x66, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x31, 0x12, 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, - 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x32, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x32, 0x12, 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, - 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, - 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x33, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x33, 0x12, 0x39, 0x0a, 0x19, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x73, 0x74, 0x65, - 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x69, 0x73, 0x50, 0x61, 0x73, 0x74, 0x65, - 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x36, 0x0a, 0x18, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x5f, 0x69, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x14, 0x6f, 0x70, 0x65, 0x6e, 0x41, 0x70, 0x69, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, - 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x22, 0xcd, 0x12, - 0x0a, 0x17, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, - 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x55, 0x0a, 0x28, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x77, - 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, - 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x23, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x57, 0x68, 0x65, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, - 0x12, 0x59, 0x0a, 0x2a, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x77, 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x25, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x57, 0x68, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x4e, 0x0a, 0x24, 0x75, - 0x74, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x77, 0x68, 0x65, - 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, - 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x20, 0x75, 0x74, 0x63, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x57, 0x68, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x70, - 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x75, 0x62, 0x6d, - 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x72, - 0x12, 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, - 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x31, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, - 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x31, 0x12, - 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, - 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x32, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, - 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, - 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x32, 0x12, 0x4d, - 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x33, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, 0x61, - 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x33, 0x12, 0x39, 0x0a, - 0x19, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x16, 0x69, 0x73, 0x50, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, - 0x12, 0x41, 0x0a, 0x1d, 0x64, 0x75, 0x70, 0x65, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x64, 0x75, 0x70, 0x65, 0x44, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x6b, 0x65, 0x6c, 0x79, - 0x5f, 0x64, 0x75, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x4c, - 0x69, 0x6b, 0x65, 0x6c, 0x79, 0x44, 0x75, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x13, 0x69, 0x73, 0x5f, - 0x72, 0x61, 0x72, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x52, 0x61, 0x72, 0x65, 0x4f, 0x6e, - 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x6f, 0x76, 0x65, 0x72, - 0x61, 0x6c, 0x6c, 0x5f, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x73, 0x63, 0x6f, - 0x72, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, - 0x6c, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x6a, - 0x0a, 0x35, 0x70, 0x63, 0x74, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x6f, 0x70, 0x5f, 0x31, 0x30, 0x5f, - 0x6d, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, 0x5f, 0x77, 0x69, 0x74, - 0x68, 0x5f, 0x64, 0x75, 0x70, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x5f, 0x61, 0x62, 0x6f, 0x76, - 0x65, 0x5f, 0x32, 0x35, 0x70, 0x63, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x02, 0x52, 0x2b, 0x70, - 0x63, 0x74, 0x4f, 0x66, 0x54, 0x6f, 0x70, 0x31, 0x30, 0x4d, 0x6f, 0x73, 0x74, 0x53, 0x69, 0x6d, - 0x69, 0x6c, 0x61, 0x72, 0x57, 0x69, 0x74, 0x68, 0x44, 0x75, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x62, - 0x41, 0x62, 0x6f, 0x76, 0x65, 0x32, 0x35, 0x70, 0x63, 0x74, 0x12, 0x6a, 0x0a, 0x35, 0x70, 0x63, - 0x74, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x6f, 0x70, 0x5f, 0x31, 0x30, 0x5f, 0x6d, 0x6f, 0x73, 0x74, - 0x5f, 0x73, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x64, 0x75, - 0x70, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x5f, 0x61, 0x62, 0x6f, 0x76, 0x65, 0x5f, 0x33, 0x33, - 0x70, 0x63, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x02, 0x52, 0x2b, 0x70, 0x63, 0x74, 0x4f, 0x66, - 0x54, 0x6f, 0x70, 0x31, 0x30, 0x4d, 0x6f, 0x73, 0x74, 0x53, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, - 0x57, 0x69, 0x74, 0x68, 0x44, 0x75, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x62, 0x41, 0x62, 0x6f, 0x76, - 0x65, 0x33, 0x33, 0x70, 0x63, 0x74, 0x12, 0x6a, 0x0a, 0x35, 0x70, 0x63, 0x74, 0x5f, 0x6f, 0x66, - 0x5f, 0x74, 0x6f, 0x70, 0x5f, 0x31, 0x30, 0x5f, 0x6d, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x6d, - 0x69, 0x6c, 0x61, 0x72, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x64, 0x75, 0x70, 0x65, 0x5f, 0x70, - 0x72, 0x6f, 0x62, 0x5f, 0x61, 0x62, 0x6f, 0x76, 0x65, 0x5f, 0x35, 0x30, 0x70, 0x63, 0x74, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x02, 0x52, 0x2b, 0x70, 0x63, 0x74, 0x4f, 0x66, 0x54, 0x6f, 0x70, 0x31, - 0x30, 0x4d, 0x6f, 0x73, 0x74, 0x53, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, 0x57, 0x69, 0x74, 0x68, - 0x44, 0x75, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x62, 0x41, 0x62, 0x6f, 0x76, 0x65, 0x35, 0x30, 0x70, - 0x63, 0x74, 0x12, 0x57, 0x0a, 0x29, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x73, - 0x63, 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, - 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x36, 0x34, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x24, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, - 0x63, 0x6f, 0x72, 0x65, 0x73, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4a, 0x73, 0x6f, 0x6e, 0x43, 0x6f, - 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x36, 0x34, 0x12, 0x4c, 0x0a, 0x11, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x5f, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, - 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x52, - 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x52, 0x10, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, - 0x74, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6f, 0x70, 0x65, - 0x6e, 0x5f, 0x6e, 0x73, 0x66, 0x77, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x13, 0x20, 0x01, - 0x28, 0x02, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x4e, 0x73, 0x66, 0x77, 0x53, 0x63, 0x6f, 0x72, - 0x65, 0x12, 0x54, 0x0a, 0x17, 0x61, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, - 0x5f, 0x6e, 0x73, 0x66, 0x77, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x14, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x41, 0x6c, 0x74, 0x4e, 0x73, 0x66, 0x77, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x73, - 0x52, 0x15, 0x61, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x4e, 0x73, 0x66, - 0x77, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x57, 0x0a, 0x29, 0x69, 0x6d, 0x61, 0x67, 0x65, - 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x5f, 0x6f, 0x66, 0x5f, - 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, - 0x66, 0x69, 0x6c, 0x65, 0x18, 0x15, 0x20, 0x03, 0x28, 0x01, 0x52, 0x24, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4f, 0x66, 0x43, 0x61, - 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x46, 0x69, 0x6c, 0x65, - 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x14, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x3e, 0x0a, 0x1c, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x6f, - 0x66, 0x5f, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x68, 0x61, - 0x73, 0x68, 0x4f, 0x66, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, - 0x67, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x36, 0x0a, 0x18, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x61, - 0x70, 0x69, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x6f, 0x70, 0x65, 0x6e, 0x41, 0x70, - 0x69, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x30, - 0x0a, 0x14, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, - 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, 0x02, 0x52, 0x12, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, 0x63, 0x6f, 0x72, 0x65, - 0x12, 0x62, 0x0a, 0x2f, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6d, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x68, 0x75, 0x6d, 0x62, 0x6e, 0x61, 0x69, 0x6c, 0x5f, 0x77, 0x65, - 0x62, 0x70, 0x5f, 0x61, 0x73, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x36, 0x34, 0x5f, 0x73, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x29, 0x63, 0x61, 0x6e, 0x64, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x54, 0x68, 0x75, 0x6d, 0x62, 0x6e, 0x61, - 0x69, 0x6c, 0x57, 0x65, 0x62, 0x70, 0x41, 0x73, 0x42, 0x61, 0x73, 0x65, 0x36, 0x34, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x12, 0x64, 0x0a, 0x30, 0x64, 0x6f, 0x65, 0x73, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x69, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x5f, 0x74, 0x68, 0x65, 0x5f, 0x66, 0x6f, 0x6c, 0x6c, - 0x6f, 0x77, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x2a, - 0x64, 0x6f, 0x65, 0x73, 0x4e, 0x6f, 0x74, 0x49, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x54, 0x68, 0x65, - 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x73, - 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x73, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x73, - 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x53, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x1c, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x73, - 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x61, - 0x73, 0x6f, 0x6e, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x19, 0x69, 0x6e, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x53, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, - 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x2d, 0x73, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, 0x69, - 0x74, 0x79, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x72, 0x73, - 0x74, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x6e, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x02, 0x52, 0x27, 0x73, 0x69, 0x6d, - 0x69, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x46, 0x69, - 0x72, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x49, 0x6e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0d, 0x63, 0x70, - 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x63, - 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x18, 0x20, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x72, 0x6f, - 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x1e, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x5f, 0x73, 0x65, - 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x21, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x1b, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, - 0x6e, 0x74, 0x53, 0x65, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x22, 0xf7, 0x03, - 0x0a, 0x10, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, - 0x73, 0x73, 0x12, 0x6c, 0x0a, 0x35, 0x72, 0x61, 0x72, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x36, 0x34, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x2d, 0x72, 0x61, 0x72, 0x65, 0x4f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, - 0x74, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x73, 0x4a, - 0x73, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x36, 0x34, - 0x12, 0x58, 0x0a, 0x2a, 0x72, 0x61, 0x72, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x65, 0x74, 0x5f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, - 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x36, 0x34, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x24, 0x72, 0x61, 0x72, 0x65, 0x4f, 0x6e, 0x49, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x65, 0x74, 0x47, 0x72, 0x61, 0x70, 0x68, 0x4a, 0x73, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x36, 0x34, 0x12, 0x72, 0x0a, 0x38, 0x61, 0x6c, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, 0x61, 0x72, 0x65, 0x5f, 0x6f, - 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x5f, 0x64, 0x69, 0x63, 0x74, 0x5f, - 0x61, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, - 0x65, 0x64, 0x5f, 0x62, 0x36, 0x34, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x30, 0x61, 0x6c, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x52, 0x61, 0x72, 0x65, 0x4f, 0x6e, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x44, 0x69, 0x63, 0x74, 0x41, 0x73, 0x4a, 0x73, 0x6f, - 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x36, 0x34, 0x12, 0x4a, - 0x0a, 0x23, 0x6d, 0x69, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, - 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x69, 0x6e, - 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1d, 0x6d, 0x69, 0x6e, - 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x45, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x73, 0x49, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x12, 0x5b, 0x0a, 0x2b, 0x65, 0x61, - 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, - 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x26, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, - 0x6c, 0x65, 0x44, 0x61, 0x74, 0x65, 0x4f, 0x66, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x85, 0x01, 0x0a, 0x0d, 0x41, 0x6c, 0x74, 0x4e, - 0x73, 0x66, 0x77, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x72, 0x61, - 0x77, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x64, 0x72, 0x61, - 0x77, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x6e, 0x74, 0x61, 0x69, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x06, 0x68, 0x65, 0x6e, 0x74, 0x61, 0x69, 0x12, 0x18, 0x0a, - 0x07, 0x6e, 0x65, 0x75, 0x74, 0x72, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x02, 0x52, 0x07, - 0x6e, 0x65, 0x75, 0x74, 0x72, 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x02, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x73, - 0x65, 0x78, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x04, 0x73, 0x65, 0x78, 0x79, 0x22, - 0x12, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x22, 0xce, 0x01, 0x0a, 0x09, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x43, 0x6f, - 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x61, 0x69, 0x74, 0x69, 0x6e, - 0x67, 0x5f, 0x69, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0e, 0x77, 0x61, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x12, 0x16, - 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, - 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, - 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x63, 0x61, 0x6e, 0x63, 0x65, - 0x6c, 0x6c, 0x65, 0x64, 0x22, 0xf9, 0x03, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x12, 0x3c, 0x0a, 0x1b, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x5f, - 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, - 0x65, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x17, 0x61, 0x76, 0x65, 0x72, 0x61, - 0x67, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x57, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, - 0x63, 0x73, 0x12, 0x34, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x77, - 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x02, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x54, 0x61, 0x73, 0x6b, 0x57, 0x61, 0x69, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x73, 0x12, 0x46, 0x0a, 0x20, 0x61, 0x76, 0x65, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x02, 0x52, 0x1c, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x73, - 0x12, 0x53, 0x0a, 0x27, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, - 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x22, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x69, - 0x72, 0x74, 0x75, 0x61, 0x6c, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, - 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x23, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x73, 0x73, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, - 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x1e, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, - 0x73, 0x73, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x12, 0x45, 0x0a, 0x20, 0x70, 0x65, 0x61, 0x6b, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x72, 0x73, 0x73, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1b, 0x70, 0x65, - 0x61, 0x6b, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x73, 0x73, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, - 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x20, 0x70, 0x65, 0x61, - 0x6b, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x76, 0x6d, 0x73, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, - 0x79, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x1b, 0x70, 0x65, 0x61, 0x6b, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x6d, 0x73, - 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x22, 0xa5, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x37, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x09, - 0x74, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x73, - 0x6b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x32, 0xc8, 0x01, 0x0a, 0x13, 0x44, 0x75, 0x70, - 0x65, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x12, 0x61, 0x0a, 0x12, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, - 0x73, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x23, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, - 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x64, 0x75, - 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x49, 0x6d, 0x61, 0x67, - 0x65, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x4e, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x1f, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x20, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x64, - 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_dd_server_proto_rawDescOnce sync.Once - file_dd_server_proto_rawDescData = file_dd_server_proto_rawDesc -) - -func file_dd_server_proto_rawDescGZIP() []byte { - file_dd_server_proto_rawDescOnce.Do(func() { - file_dd_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_dd_server_proto_rawDescData) - }) - return file_dd_server_proto_rawDescData -} - -var file_dd_server_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_dd_server_proto_goTypes = []any{ - (*RarenessScoreRequest)(nil), // 0: dupedetection.RarenessScoreRequest - (*ImageRarenessScoreReply)(nil), // 1: dupedetection.ImageRarenessScoreReply - (*InternetRareness)(nil), // 2: dupedetection.InternetRareness - (*AltNsfwScores)(nil), // 3: dupedetection.AltNsfwScores - (*GetStatusRequest)(nil), // 4: dupedetection.GetStatusRequest - (*TaskCount)(nil), // 5: dupedetection.TaskCount - (*TaskMetrics)(nil), // 6: dupedetection.TaskMetrics - (*GetStatusResponse)(nil), // 7: dupedetection.GetStatusResponse -} -var file_dd_server_proto_depIdxs = []int32{ - 2, // 0: dupedetection.ImageRarenessScoreReply.internet_rareness:type_name -> dupedetection.InternetRareness - 3, // 1: dupedetection.ImageRarenessScoreReply.alternative_nsfw_scores:type_name -> dupedetection.AltNsfwScores - 5, // 2: dupedetection.GetStatusResponse.task_count:type_name -> dupedetection.TaskCount - 6, // 3: dupedetection.GetStatusResponse.task_metrics:type_name -> dupedetection.TaskMetrics - 0, // 4: dupedetection.DupeDetectionServer.ImageRarenessScore:input_type -> dupedetection.RarenessScoreRequest - 4, // 5: dupedetection.DupeDetectionServer.GetStatus:input_type -> dupedetection.GetStatusRequest - 1, // 6: dupedetection.DupeDetectionServer.ImageRarenessScore:output_type -> dupedetection.ImageRarenessScoreReply - 7, // 7: dupedetection.DupeDetectionServer.GetStatus:output_type -> dupedetection.GetStatusResponse - 6, // [6:8] is the sub-list for method output_type - 4, // [4:6] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_dd_server_proto_init() } -func file_dd_server_proto_init() { - if File_dd_server_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_dd_server_proto_rawDesc, - NumEnums: 0, - NumMessages: 8, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_dd_server_proto_goTypes, - DependencyIndexes: file_dd_server_proto_depIdxs, - MessageInfos: file_dd_server_proto_msgTypes, - }.Build() - File_dd_server_proto = out.File - file_dd_server_proto_rawDesc = nil - file_dd_server_proto_goTypes = nil - file_dd_server_proto_depIdxs = nil -} diff --git a/gen/dupedetection/dd-server_grpc.pb.go b/gen/dupedetection/dd-server_grpc.pb.go deleted file mode 100644 index 27ee79bf..00000000 --- a/gen/dupedetection/dd-server_grpc.pb.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v3.12.4 -// source: dd-server.proto - -package dupedetection - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - DupeDetectionServer_ImageRarenessScore_FullMethodName = "/dupedetection.DupeDetectionServer/ImageRarenessScore" - DupeDetectionServer_GetStatus_FullMethodName = "/dupedetection.DupeDetectionServer/GetStatus" -) - -// DupeDetectionServerClient is the client API for DupeDetectionServer service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type DupeDetectionServerClient interface { - ImageRarenessScore(ctx context.Context, in *RarenessScoreRequest, opts ...grpc.CallOption) (*ImageRarenessScoreReply, error) - GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error) -} - -type dupeDetectionServerClient struct { - cc grpc.ClientConnInterface -} - -func NewDupeDetectionServerClient(cc grpc.ClientConnInterface) DupeDetectionServerClient { - return &dupeDetectionServerClient{cc} -} - -func (c *dupeDetectionServerClient) ImageRarenessScore(ctx context.Context, in *RarenessScoreRequest, opts ...grpc.CallOption) (*ImageRarenessScoreReply, error) { - out := new(ImageRarenessScoreReply) - err := c.cc.Invoke(ctx, DupeDetectionServer_ImageRarenessScore_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *dupeDetectionServerClient) GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error) { - out := new(GetStatusResponse) - err := c.cc.Invoke(ctx, DupeDetectionServer_GetStatus_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// DupeDetectionServerServer is the server API for DupeDetectionServer service. -// All implementations must embed UnimplementedDupeDetectionServerServer -// for forward compatibility -type DupeDetectionServerServer interface { - ImageRarenessScore(context.Context, *RarenessScoreRequest) (*ImageRarenessScoreReply, error) - GetStatus(context.Context, *GetStatusRequest) (*GetStatusResponse, error) - mustEmbedUnimplementedDupeDetectionServerServer() -} - -// UnimplementedDupeDetectionServerServer must be embedded to have forward compatible implementations. -type UnimplementedDupeDetectionServerServer struct { -} - -func (UnimplementedDupeDetectionServerServer) ImageRarenessScore(context.Context, *RarenessScoreRequest) (*ImageRarenessScoreReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method ImageRarenessScore not implemented") -} -func (UnimplementedDupeDetectionServerServer) GetStatus(context.Context, *GetStatusRequest) (*GetStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetStatus not implemented") -} -func (UnimplementedDupeDetectionServerServer) mustEmbedUnimplementedDupeDetectionServerServer() {} - -// UnsafeDupeDetectionServerServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to DupeDetectionServerServer will -// result in compilation errors. -type UnsafeDupeDetectionServerServer interface { - mustEmbedUnimplementedDupeDetectionServerServer() -} - -func RegisterDupeDetectionServerServer(s grpc.ServiceRegistrar, srv DupeDetectionServerServer) { - s.RegisterService(&DupeDetectionServer_ServiceDesc, srv) -} - -func _DupeDetectionServer_ImageRarenessScore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RarenessScoreRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DupeDetectionServerServer).ImageRarenessScore(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: DupeDetectionServer_ImageRarenessScore_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DupeDetectionServerServer).ImageRarenessScore(ctx, req.(*RarenessScoreRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _DupeDetectionServer_GetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DupeDetectionServerServer).GetStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: DupeDetectionServer_GetStatus_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DupeDetectionServerServer).GetStatus(ctx, req.(*GetStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// DupeDetectionServer_ServiceDesc is the grpc.ServiceDesc for DupeDetectionServer service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var DupeDetectionServer_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "dupedetection.DupeDetectionServer", - HandlerType: (*DupeDetectionServerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ImageRarenessScore", - Handler: _DupeDetectionServer_ImageRarenessScore_Handler, - }, - { - MethodName: "GetStatus", - Handler: _DupeDetectionServer_GetStatus_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "dd-server.proto", -} diff --git a/gen/raptorq/raptorq.pb.go b/gen/raptorq/raptorq.pb.go deleted file mode 100644 index 8c9ba9d0..00000000 --- a/gen/raptorq/raptorq.pb.go +++ /dev/null @@ -1,476 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.35.1 -// protoc v3.12.4 -// source: raptorq.proto - -package raptorq - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type EncodeMetaDataRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - FilesNumber uint32 `protobuf:"varint,2,opt,name=files_number,json=filesNumber,proto3" json:"files_number,omitempty"` - BlockHash string `protobuf:"bytes,3,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` - PastelId string `protobuf:"bytes,4,opt,name=pastel_id,json=pastelId,proto3" json:"pastel_id,omitempty"` -} - -func (x *EncodeMetaDataRequest) Reset() { - *x = EncodeMetaDataRequest{} - mi := &file_raptorq_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EncodeMetaDataRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EncodeMetaDataRequest) ProtoMessage() {} - -func (x *EncodeMetaDataRequest) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EncodeMetaDataRequest.ProtoReflect.Descriptor instead. -func (*EncodeMetaDataRequest) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{0} -} - -func (x *EncodeMetaDataRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *EncodeMetaDataRequest) GetFilesNumber() uint32 { - if x != nil { - return x.FilesNumber - } - return 0 -} - -func (x *EncodeMetaDataRequest) GetBlockHash() string { - if x != nil { - return x.BlockHash - } - return "" -} - -func (x *EncodeMetaDataRequest) GetPastelId() string { - if x != nil { - return x.PastelId - } - return "" -} - -type EncodeMetaDataReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EncoderParameters []byte `protobuf:"bytes,1,opt,name=encoder_parameters,json=encoderParameters,proto3" json:"encoder_parameters,omitempty"` - SymbolsCount uint32 `protobuf:"varint,2,opt,name=symbols_count,json=symbolsCount,proto3" json:"symbols_count,omitempty"` - Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *EncodeMetaDataReply) Reset() { - *x = EncodeMetaDataReply{} - mi := &file_raptorq_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EncodeMetaDataReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EncodeMetaDataReply) ProtoMessage() {} - -func (x *EncodeMetaDataReply) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EncodeMetaDataReply.ProtoReflect.Descriptor instead. -func (*EncodeMetaDataReply) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{1} -} - -func (x *EncodeMetaDataReply) GetEncoderParameters() []byte { - if x != nil { - return x.EncoderParameters - } - return nil -} - -func (x *EncodeMetaDataReply) GetSymbolsCount() uint32 { - if x != nil { - return x.SymbolsCount - } - return 0 -} - -func (x *EncodeMetaDataReply) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type EncodeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *EncodeRequest) Reset() { - *x = EncodeRequest{} - mi := &file_raptorq_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EncodeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EncodeRequest) ProtoMessage() {} - -func (x *EncodeRequest) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EncodeRequest.ProtoReflect.Descriptor instead. -func (*EncodeRequest) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{2} -} - -func (x *EncodeRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type EncodeReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EncoderParameters []byte `protobuf:"bytes,1,opt,name=encoder_parameters,json=encoderParameters,proto3" json:"encoder_parameters,omitempty"` - SymbolsCount uint32 `protobuf:"varint,2,opt,name=symbols_count,json=symbolsCount,proto3" json:"symbols_count,omitempty"` - Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *EncodeReply) Reset() { - *x = EncodeReply{} - mi := &file_raptorq_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EncodeReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EncodeReply) ProtoMessage() {} - -func (x *EncodeReply) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EncodeReply.ProtoReflect.Descriptor instead. -func (*EncodeReply) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{3} -} - -func (x *EncodeReply) GetEncoderParameters() []byte { - if x != nil { - return x.EncoderParameters - } - return nil -} - -func (x *EncodeReply) GetSymbolsCount() uint32 { - if x != nil { - return x.SymbolsCount - } - return 0 -} - -func (x *EncodeReply) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type DecodeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EncoderParameters []byte `protobuf:"bytes,1,opt,name=encoder_parameters,json=encoderParameters,proto3" json:"encoder_parameters,omitempty"` - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *DecodeRequest) Reset() { - *x = DecodeRequest{} - mi := &file_raptorq_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DecodeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DecodeRequest) ProtoMessage() {} - -func (x *DecodeRequest) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DecodeRequest.ProtoReflect.Descriptor instead. -func (*DecodeRequest) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{4} -} - -func (x *DecodeRequest) GetEncoderParameters() []byte { - if x != nil { - return x.EncoderParameters - } - return nil -} - -func (x *DecodeRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type DecodeReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *DecodeReply) Reset() { - *x = DecodeReply{} - mi := &file_raptorq_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DecodeReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DecodeReply) ProtoMessage() {} - -func (x *DecodeReply) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DecodeReply.ProtoReflect.Descriptor instead. -func (*DecodeReply) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{5} -} - -func (x *DecodeReply) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -var File_raptorq_proto protoreflect.FileDescriptor - -var file_raptorq_proto_rawDesc = []byte{ - 0x0a, 0x0d, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x07, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x22, 0x8a, 0x01, 0x0a, 0x15, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5f, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x66, 0x69, - 0x6c, 0x65, 0x73, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x73, 0x74, - 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x49, 0x64, 0x22, 0x7d, 0x0a, 0x13, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x4d, - 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2d, 0x0a, 0x12, - 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, - 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, - 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x0c, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x22, 0x23, 0x0a, 0x0d, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x75, 0x0a, 0x0b, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x79, 0x6d, 0x62, 0x6f, - 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, - 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x22, 0x52, 0x0a, 0x0d, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x65, - 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x22, 0x21, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x32, 0xc9, 0x01, 0x0a, 0x07, 0x52, 0x61, 0x70, 0x74, - 0x6f, 0x72, 0x51, 0x12, 0x4e, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1e, 0x2e, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, 0x06, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x2e, - 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, 0x06, 0x44, - 0x65, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, - 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, - 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x72, - 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_raptorq_proto_rawDescOnce sync.Once - file_raptorq_proto_rawDescData = file_raptorq_proto_rawDesc -) - -func file_raptorq_proto_rawDescGZIP() []byte { - file_raptorq_proto_rawDescOnce.Do(func() { - file_raptorq_proto_rawDescData = protoimpl.X.CompressGZIP(file_raptorq_proto_rawDescData) - }) - return file_raptorq_proto_rawDescData -} - -var file_raptorq_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_raptorq_proto_goTypes = []any{ - (*EncodeMetaDataRequest)(nil), // 0: raptorq.EncodeMetaDataRequest - (*EncodeMetaDataReply)(nil), // 1: raptorq.EncodeMetaDataReply - (*EncodeRequest)(nil), // 2: raptorq.EncodeRequest - (*EncodeReply)(nil), // 3: raptorq.EncodeReply - (*DecodeRequest)(nil), // 4: raptorq.DecodeRequest - (*DecodeReply)(nil), // 5: raptorq.DecodeReply -} -var file_raptorq_proto_depIdxs = []int32{ - 0, // 0: raptorq.RaptorQ.EncodeMetaData:input_type -> raptorq.EncodeMetaDataRequest - 2, // 1: raptorq.RaptorQ.Encode:input_type -> raptorq.EncodeRequest - 4, // 2: raptorq.RaptorQ.Decode:input_type -> raptorq.DecodeRequest - 1, // 3: raptorq.RaptorQ.EncodeMetaData:output_type -> raptorq.EncodeMetaDataReply - 3, // 4: raptorq.RaptorQ.Encode:output_type -> raptorq.EncodeReply - 5, // 5: raptorq.RaptorQ.Decode:output_type -> raptorq.DecodeReply - 3, // [3:6] is the sub-list for method output_type - 0, // [0:3] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_raptorq_proto_init() } -func file_raptorq_proto_init() { - if File_raptorq_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_raptorq_proto_rawDesc, - NumEnums: 0, - NumMessages: 6, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_raptorq_proto_goTypes, - DependencyIndexes: file_raptorq_proto_depIdxs, - MessageInfos: file_raptorq_proto_msgTypes, - }.Build() - File_raptorq_proto = out.File - file_raptorq_proto_rawDesc = nil - file_raptorq_proto_goTypes = nil - file_raptorq_proto_depIdxs = nil -} diff --git a/gen/raptorq/raptorq_grpc.pb.go b/gen/raptorq/raptorq_grpc.pb.go deleted file mode 100644 index 01c17ae8..00000000 --- a/gen/raptorq/raptorq_grpc.pb.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v3.12.4 -// source: raptorq.proto - -package raptorq - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - RaptorQ_EncodeMetaData_FullMethodName = "/raptorq.RaptorQ/EncodeMetaData" - RaptorQ_Encode_FullMethodName = "/raptorq.RaptorQ/Encode" - RaptorQ_Decode_FullMethodName = "/raptorq.RaptorQ/Decode" -) - -// RaptorQClient is the client API for RaptorQ service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type RaptorQClient interface { - EncodeMetaData(ctx context.Context, in *EncodeMetaDataRequest, opts ...grpc.CallOption) (*EncodeMetaDataReply, error) - Encode(ctx context.Context, in *EncodeRequest, opts ...grpc.CallOption) (*EncodeReply, error) - Decode(ctx context.Context, in *DecodeRequest, opts ...grpc.CallOption) (*DecodeReply, error) -} - -type raptorQClient struct { - cc grpc.ClientConnInterface -} - -func NewRaptorQClient(cc grpc.ClientConnInterface) RaptorQClient { - return &raptorQClient{cc} -} - -func (c *raptorQClient) EncodeMetaData(ctx context.Context, in *EncodeMetaDataRequest, opts ...grpc.CallOption) (*EncodeMetaDataReply, error) { - out := new(EncodeMetaDataReply) - err := c.cc.Invoke(ctx, RaptorQ_EncodeMetaData_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *raptorQClient) Encode(ctx context.Context, in *EncodeRequest, opts ...grpc.CallOption) (*EncodeReply, error) { - out := new(EncodeReply) - err := c.cc.Invoke(ctx, RaptorQ_Encode_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *raptorQClient) Decode(ctx context.Context, in *DecodeRequest, opts ...grpc.CallOption) (*DecodeReply, error) { - out := new(DecodeReply) - err := c.cc.Invoke(ctx, RaptorQ_Decode_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// RaptorQServer is the server API for RaptorQ service. -// All implementations must embed UnimplementedRaptorQServer -// for forward compatibility -type RaptorQServer interface { - EncodeMetaData(context.Context, *EncodeMetaDataRequest) (*EncodeMetaDataReply, error) - Encode(context.Context, *EncodeRequest) (*EncodeReply, error) - Decode(context.Context, *DecodeRequest) (*DecodeReply, error) - mustEmbedUnimplementedRaptorQServer() -} - -// UnimplementedRaptorQServer must be embedded to have forward compatible implementations. -type UnimplementedRaptorQServer struct { -} - -func (UnimplementedRaptorQServer) EncodeMetaData(context.Context, *EncodeMetaDataRequest) (*EncodeMetaDataReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method EncodeMetaData not implemented") -} -func (UnimplementedRaptorQServer) Encode(context.Context, *EncodeRequest) (*EncodeReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method Encode not implemented") -} -func (UnimplementedRaptorQServer) Decode(context.Context, *DecodeRequest) (*DecodeReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method Decode not implemented") -} -func (UnimplementedRaptorQServer) mustEmbedUnimplementedRaptorQServer() {} - -// UnsafeRaptorQServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to RaptorQServer will -// result in compilation errors. -type UnsafeRaptorQServer interface { - mustEmbedUnimplementedRaptorQServer() -} - -func RegisterRaptorQServer(s grpc.ServiceRegistrar, srv RaptorQServer) { - s.RegisterService(&RaptorQ_ServiceDesc, srv) -} - -func _RaptorQ_EncodeMetaData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EncodeMetaDataRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RaptorQServer).EncodeMetaData(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: RaptorQ_EncodeMetaData_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RaptorQServer).EncodeMetaData(ctx, req.(*EncodeMetaDataRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _RaptorQ_Encode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EncodeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RaptorQServer).Encode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: RaptorQ_Encode_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RaptorQServer).Encode(ctx, req.(*EncodeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _RaptorQ_Decode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DecodeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RaptorQServer).Decode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: RaptorQ_Decode_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RaptorQServer).Decode(ctx, req.(*DecodeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// RaptorQ_ServiceDesc is the grpc.ServiceDesc for RaptorQ service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var RaptorQ_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "raptorq.RaptorQ", - HandlerType: (*RaptorQServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "EncodeMetaData", - Handler: _RaptorQ_EncodeMetaData_Handler, - }, - { - MethodName: "Encode", - Handler: _RaptorQ_Encode_Handler, - }, - { - MethodName: "Decode", - Handler: _RaptorQ_Decode_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "raptorq.proto", -} diff --git a/gen/supernode/agents/.gitkeep b/gen/supernode/agents/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/pkg/dd/client.go b/pkg/dd/client.go deleted file mode 100644 index f7b10c80..00000000 --- a/pkg/dd/client.go +++ /dev/null @@ -1,46 +0,0 @@ -package dd - -import ( - "context" - "time" - - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/random" - "google.golang.org/grpc" - "google.golang.org/grpc/encoding/gzip" -) - -const ( - defaultConnectTimeout = 60 * time.Second -) - -type client struct{} - -// Connect implements node.Client.Connect() -func (cl *client) Connect(ctx context.Context, address string) (Connection, error) { - // Limits the dial timeout, prevent got stuck too long - dialCtx, cancel := context.WithTimeout(ctx, defaultConnectTimeout) - defer cancel() - - id, _ := random.String(8, random.Base62Chars) - - grpcConn, err := grpc.DialContext(dialCtx, address, - //lint:ignore SA1019 we want to ignore this for now - grpc.WithInsecure(), - grpc.WithBlock(), - grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name), grpc.MaxCallRecvMsgSize(35000000)), - ) - if err != nil { - return nil, errors.Errorf("fail to dial: %w", err).WithField("address", address) - } - - logtrace.Debug(ctx, "Connected to address with max recv size 35 MB", logtrace.Fields{logtrace.FieldModule: "dd", "address": address}) - - conn := newClientConn(id, grpcConn) - go func() { - //<-conn.Done() // FIXME: to be implemented by new gRPC package - logtrace.Debug(ctx, "Disconnected", logtrace.Fields{logtrace.FieldModule: "dd", "target": grpcConn.Target()}) - }() - return conn, nil -} diff --git a/pkg/dd/config.go b/pkg/dd/config.go deleted file mode 100644 index c0ca0607..00000000 --- a/pkg/dd/config.go +++ /dev/null @@ -1,50 +0,0 @@ -package dd - -import ( - "fmt" - "path/filepath" -) - -const ( - errValidationStr = "ddserver client validation failed - missing val" -) - -// Config contains settings of the dd-server -type Config struct { - // Host the queries IPv4 or IPv6 address - Host string `mapstructure:"host" json:"host,omitempty"` - - // Port the queries port to listen for connections on - Port int `mapstructure:"port" json:"port,omitempty"` - - // DDFilesDir - the location of temporary folder to transfer image data to ddserver - DDFilesDir string `mapstructure:"dd-temp-file-dir" json:"dd-temp-file-dir,omitempty"` -} - -// NewConfig returns a new Config instance. -func NewConfig() *Config { - return &Config{} -} - -// SetWorkDir update working dir -func (config *Config) SetWorkDir(workDir string) { - if !filepath.IsAbs(config.DDFilesDir) { - config.DDFilesDir = filepath.Join(workDir, config.DDFilesDir) - } -} - -// Validate raptorq configs -func (config *Config) Validate() error { - if config.Host == "" { - return fmt.Errorf("%s: %s", errValidationStr, "host") - } - if config.Port == 0 { - return fmt.Errorf("%s: %s", errValidationStr, "port") - } - - if config.DDFilesDir == "" { - return fmt.Errorf("%s: %s", errValidationStr, "dd-temp-file-dir") - } - - return nil -} diff --git a/pkg/dd/connection.go b/pkg/dd/connection.go deleted file mode 100644 index 34f3b20e..00000000 --- a/pkg/dd/connection.go +++ /dev/null @@ -1,23 +0,0 @@ -package dd - -import ( - "google.golang.org/grpc" -) - -// clientConn represents grpc client conneciton. -type clientConn struct { - *grpc.ClientConn - - id string -} - -func (conn *clientConn) DDService(config *Config) DDService { - return newDDServerClient(conn, config) -} - -func newClientConn(id string, conn *grpc.ClientConn) *clientConn { - return &clientConn{ - ClientConn: conn, - id: id, - } -} diff --git a/pkg/dd/dd_mock.go b/pkg/dd/dd_mock.go deleted file mode 100644 index 224831c6..00000000 --- a/pkg/dd/dd_mock.go +++ /dev/null @@ -1,162 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: interfaces.go -// -// Generated by this command: -// -// mockgen -destination=dd_mock.go -package=dd -source=interfaces.go -// - -// Package dd is a generated GoMock package. -package dd - -import ( - context "context" - reflect "reflect" - - gomock "go.uber.org/mock/gomock" -) - -// MockClientInterface is a mock of ClientInterface interface. -type MockClientInterface struct { - ctrl *gomock.Controller - recorder *MockClientInterfaceMockRecorder - isgomock struct{} -} - -// MockClientInterfaceMockRecorder is the mock recorder for MockClientInterface. -type MockClientInterfaceMockRecorder struct { - mock *MockClientInterface -} - -// NewMockClientInterface creates a new mock instance. -func NewMockClientInterface(ctrl *gomock.Controller) *MockClientInterface { - mock := &MockClientInterface{ctrl: ctrl} - mock.recorder = &MockClientInterfaceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockClientInterface) EXPECT() *MockClientInterfaceMockRecorder { - return m.recorder -} - -// Connect mocks base method. -func (m *MockClientInterface) Connect(ctx context.Context, address string) (Connection, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Connect", ctx, address) - ret0, _ := ret[0].(Connection) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Connect indicates an expected call of Connect. -func (mr *MockClientInterfaceMockRecorder) Connect(ctx, address any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockClientInterface)(nil).Connect), ctx, address) -} - -// MockConnection is a mock of Connection interface. -type MockConnection struct { - ctrl *gomock.Controller - recorder *MockConnectionMockRecorder - isgomock struct{} -} - -// MockConnectionMockRecorder is the mock recorder for MockConnection. -type MockConnectionMockRecorder struct { - mock *MockConnection -} - -// NewMockConnection creates a new mock instance. -func NewMockConnection(ctrl *gomock.Controller) *MockConnection { - mock := &MockConnection{ctrl: ctrl} - mock.recorder = &MockConnectionMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockConnection) EXPECT() *MockConnectionMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockConnection) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockConnectionMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockConnection)(nil).Close)) -} - -// DDService mocks base method. -func (m *MockConnection) DDService(config *Config) DDService { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DDService", config) - ret0, _ := ret[0].(DDService) - return ret0 -} - -// DDService indicates an expected call of DDService. -func (mr *MockConnectionMockRecorder) DDService(config any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DDService", reflect.TypeOf((*MockConnection)(nil).DDService), config) -} - -// MockDDService is a mock of DDService interface. -type MockDDService struct { - ctrl *gomock.Controller - recorder *MockDDServiceMockRecorder - isgomock struct{} -} - -// MockDDServiceMockRecorder is the mock recorder for MockDDService. -type MockDDServiceMockRecorder struct { - mock *MockDDService -} - -// NewMockDDService creates a new mock instance. -func NewMockDDService(ctrl *gomock.Controller) *MockDDService { - mock := &MockDDService{ctrl: ctrl} - mock.recorder = &MockDDServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDDService) EXPECT() *MockDDServiceMockRecorder { - return m.recorder -} - -// GetStatus mocks base method. -func (m *MockDDService) GetStatus(ctx context.Context, req GetStatusRequest) (GetStatusResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStatus", ctx, req) - ret0, _ := ret[0].(GetStatusResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetStatus indicates an expected call of GetStatus. -func (mr *MockDDServiceMockRecorder) GetStatus(ctx, req any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatus", reflect.TypeOf((*MockDDService)(nil).GetStatus), ctx, req) -} - -// ImageRarenessScore mocks base method. -func (m *MockDDService) ImageRarenessScore(ctx context.Context, req RarenessScoreRequest) (ImageRarenessScoreResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImageRarenessScore", ctx, req) - ret0, _ := ret[0].(ImageRarenessScoreResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ImageRarenessScore indicates an expected call of ImageRarenessScore. -func (mr *MockDDServiceMockRecorder) ImageRarenessScore(ctx, req any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageRarenessScore", reflect.TypeOf((*MockDDService)(nil).ImageRarenessScore), ctx, req) -} diff --git a/pkg/dd/dd_server_client.go b/pkg/dd/dd_server_client.go deleted file mode 100644 index 5f927805..00000000 --- a/pkg/dd/dd_server_client.go +++ /dev/null @@ -1,24 +0,0 @@ -package dd - -import ( - dd "github.com/LumeraProtocol/supernode/v2/gen/dupedetection" -) - -type ddServerClientImpl struct { - config *Config - conn *clientConn - ddService dd.DupeDetectionServerClient -} - -// NewDDServerClient returns a new dd-server-client instance. -func newDDServerClient(conn *clientConn, c *Config) DDService { - return &ddServerClientImpl{ - config: c, - conn: conn, - ddService: dd.NewDupeDetectionServerClient(conn), - } -} - -func (c *ddServerClientImpl) Close() { - c.conn.Close() -} diff --git a/pkg/dd/image_rareness.go b/pkg/dd/image_rareness.go deleted file mode 100644 index 74fec800..00000000 --- a/pkg/dd/image_rareness.go +++ /dev/null @@ -1,108 +0,0 @@ -package dd - -import ( - "context" - "fmt" - - ddService "github.com/LumeraProtocol/supernode/v2/gen/dupedetection" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/net" -) - -type RarenessScoreRequest struct { - Filepath string -} - -type ImageRarenessScoreResponse struct { - PastelBlockHashWhenRequestSubmitted string - PastelBlockHeightWhenRequestSubmitted string - UtcTimestampWhenRequestSubmitted string - PastelIdOfSubmitter string - PastelIdOfRegisteringSupernode_1 string - PastelIdOfRegisteringSupernode_2 string - PastelIdOfRegisteringSupernode_3 string - IsPastelOpenapiRequest bool - ImageFilePath string - DupeDetectionSystemVersion string - IsLikelyDupe bool - IsRareOnInternet bool - OverallRarenessScore float32 - PctOfTop_10MostSimilarWithDupeProbAbove_25Pct float32 - PctOfTop_10MostSimilarWithDupeProbAbove_33Pct float32 - PctOfTop_10MostSimilarWithDupeProbAbove_50Pct float32 - RarenessScoresTableJsonCompressedB64 string - InternetRareness *ddService.InternetRareness - OpenNsfwScore float32 - AlternativeNsfwScores *ddService.AltNsfwScores - ImageFingerprintOfCandidateImageFile []float64 - CollectionNameString string - HashOfCandidateImageFile string - OpenApiGroupIdString string - GroupRarenessScore float32 - CandidateImageThumbnailWebpAsBase64String string - DoesNotImpactTheFollowingCollectionStrings string - IsInvalidSenseRequest bool - InvalidSenseRequestReason string - SimilarityScoreToFirstEntryInCollection float32 - CpProbability float32 - ChildProbability float32 - ImageFingerprintSetChecksum string -} - -// ImageRarenessScore gets the image rareness score -func (c *ddServerClientImpl) ImageRarenessScore(ctx context.Context, req RarenessScoreRequest) (ImageRarenessScoreResponse, error) { - ctx = net.AddCorrelationID(ctx) - fields := logtrace.Fields{ - logtrace.FieldMethod: "ImageRarenessScore", - logtrace.FieldRequest: req, - } - logtrace.Debug(ctx, "getting image rareness score", fields) - - res, err := c.ddService.ImageRarenessScore(ctx, &ddService.RarenessScoreRequest{ImageFilepath: req.Filepath}) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to get image rareness score", fields) - return ImageRarenessScoreResponse{}, fmt.Errorf("dd image rareness score error: %w", err) - } - - logtrace.Debug(ctx, "successfully got image rareness score", fields) - return toImageRarenessScoreResponse(res), nil -} - -func toImageRarenessScoreResponse(reply *ddService.ImageRarenessScoreReply) ImageRarenessScoreResponse { - return ImageRarenessScoreResponse{ - PastelBlockHashWhenRequestSubmitted: reply.PastelBlockHashWhenRequestSubmitted, - PastelBlockHeightWhenRequestSubmitted: reply.PastelBlockHeightWhenRequestSubmitted, - UtcTimestampWhenRequestSubmitted: reply.UtcTimestampWhenRequestSubmitted, - PastelIdOfSubmitter: reply.PastelIdOfSubmitter, - PastelIdOfRegisteringSupernode_1: reply.PastelIdOfRegisteringSupernode_1, - PastelIdOfRegisteringSupernode_2: reply.PastelIdOfRegisteringSupernode_2, - PastelIdOfRegisteringSupernode_3: reply.PastelIdOfRegisteringSupernode_3, - IsPastelOpenapiRequest: reply.IsPastelOpenapiRequest, - ImageFilePath: reply.ImageFilePath, - DupeDetectionSystemVersion: reply.DupeDetectionSystemVersion, - IsLikelyDupe: reply.IsLikelyDupe, - IsRareOnInternet: reply.IsRareOnInternet, - OverallRarenessScore: reply.OverallRarenessScore, - PctOfTop_10MostSimilarWithDupeProbAbove_25Pct: reply.PctOfTop_10MostSimilarWithDupeProbAbove_25Pct, - PctOfTop_10MostSimilarWithDupeProbAbove_33Pct: reply.PctOfTop_10MostSimilarWithDupeProbAbove_33Pct, - PctOfTop_10MostSimilarWithDupeProbAbove_50Pct: reply.PctOfTop_10MostSimilarWithDupeProbAbove_50Pct, - RarenessScoresTableJsonCompressedB64: reply.RarenessScoresTableJsonCompressedB64, - InternetRareness: reply.InternetRareness, - OpenNsfwScore: reply.OpenNsfwScore, - AlternativeNsfwScores: reply.AlternativeNsfwScores, - ImageFingerprintOfCandidateImageFile: reply.ImageFingerprintOfCandidateImageFile, - CollectionNameString: reply.CollectionNameString, - HashOfCandidateImageFile: reply.HashOfCandidateImageFile, - OpenApiGroupIdString: reply.OpenApiGroupIdString, - GroupRarenessScore: reply.GroupRarenessScore, - CandidateImageThumbnailWebpAsBase64String: reply.CandidateImageThumbnailWebpAsBase64String, - DoesNotImpactTheFollowingCollectionStrings: reply.DoesNotImpactTheFollowingCollectionStrings, - IsInvalidSenseRequest: reply.IsInvalidSenseRequest, - InvalidSenseRequestReason: reply.InvalidSenseRequestReason, - SimilarityScoreToFirstEntryInCollection: reply.SimilarityScoreToFirstEntryInCollection, - CpProbability: reply.CpProbability, - ChildProbability: reply.ChildProbability, - ImageFingerprintSetChecksum: reply.ImageFingerprintSetChecksum, - } -} diff --git a/pkg/dd/interfaces.go b/pkg/dd/interfaces.go deleted file mode 100644 index 45b196d3..00000000 --- a/pkg/dd/interfaces.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:generate mockgen -destination=dd_mock.go -package=dd -source=interfaces.go - -package dd - -import "context" - -// ClientInterface represents a base connection interface. -type ClientInterface interface { - // Connect connects to the server at the given address. - Connect(ctx context.Context, address string) (Connection, error) -} - -// Connection represents a client connection -type Connection interface { - // Close closes connection. - Close() error - - // DDService returns a new dd-service stream. - DDService(config *Config) DDService - - // FIXME: - // Done returns a channel that's closed when connection is shutdown. - //Done() <-chan struct{} -} - -// DDService contains methods for request services from dd-service. -type DDService interface { - ImageRarenessScore(ctx context.Context, req RarenessScoreRequest) (ImageRarenessScoreResponse, error) - GetStatus(ctx context.Context, req GetStatusRequest) (GetStatusResponse, error) -} diff --git a/pkg/dd/status.go b/pkg/dd/status.go deleted file mode 100644 index 812b62d6..00000000 --- a/pkg/dd/status.go +++ /dev/null @@ -1,44 +0,0 @@ -package dd - -import ( - "context" - "fmt" - - ddService "github.com/LumeraProtocol/supernode/v2/gen/dupedetection" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/net" -) - -type GetStatusRequest struct { -} - -type GetStatusResponse struct { - Version string - TaskCount *ddService.TaskCount - TaskMetrics *ddService.TaskMetrics -} - -// GetStatus retrieves the status. -func (c *ddServerClientImpl) GetStatus(ctx context.Context, req GetStatusRequest) (GetStatusResponse, error) { - ctx = net.AddCorrelationID(ctx) - - fields := logtrace.Fields{ - logtrace.FieldMethod: "GetStatus", - logtrace.FieldRequest: req, - } - logtrace.Debug(ctx, "getting status", fields) - - res, err := c.ddService.GetStatus(ctx, &ddService.GetStatusRequest{}) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to get status", fields) - return GetStatusResponse{}, fmt.Errorf("dd get status error: %w", err) - } - - logtrace.Debug(ctx, "successfully got status", fields) - return GetStatusResponse{ - Version: res.GetVersion(), - TaskCount: res.GetTaskCount(), - TaskMetrics: res.GetTaskMetrics(), - }, nil -} diff --git a/proto/dupedetection/dd-server.proto b/proto/dupedetection/dd-server.proto deleted file mode 100644 index 0217aece..00000000 --- a/proto/dupedetection/dd-server.proto +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -syntax = "proto3"; - -option go_package = "github.com/LumeraProtocol/supernode/v2/gen/dupedetection"; - -package dupedetection; - -service DupeDetectionServer { - rpc ImageRarenessScore(RarenessScoreRequest) returns(ImageRarenessScoreReply); - rpc GetStatus(GetStatusRequest) returns(GetStatusResponse); -} - -message RarenessScoreRequest { - string image_filepath = 1; - string pastel_block_hash_when_request_submitted = 2; - string pastel_block_height_when_request_submitted = 3; - string utc_timestamp_when_request_submitted = 4; - string pastel_id_of_submitter = 5; - string pastel_id_of_registering_supernode_1 = 6; - string pastel_id_of_registering_supernode_2 = 7; - string pastel_id_of_registering_supernode_3 = 8; - bool is_pastel_openapi_request = 9; - string open_api_group_id_string = 10; - string collection_name_string = 11; -} - -message ImageRarenessScoreReply { - string pastel_block_hash_when_request_submitted = 1; - string pastel_block_height_when_request_submitted = 2; - string utc_timestamp_when_request_submitted = 3; - string pastel_id_of_submitter = 4; - string pastel_id_of_registering_supernode_1 = 5; - string pastel_id_of_registering_supernode_2 = 6; - string pastel_id_of_registering_supernode_3 = 7; - bool is_pastel_openapi_request = 8; - string image_file_path = 9; - string dupe_detection_system_version = 10; - bool is_likely_dupe = 11; - bool is_rare_on_internet = 12; - float overall_rareness_score = 13; - float pct_of_top_10_most_similar_with_dupe_prob_above_25pct = 14; - float pct_of_top_10_most_similar_with_dupe_prob_above_33pct = 15; - float pct_of_top_10_most_similar_with_dupe_prob_above_50pct = 16; - string rareness_scores_table_json_compressed_b64 = 17; - InternetRareness internet_rareness = 18; - float open_nsfw_score = 19; - AltNsfwScores alternative_nsfw_scores = 20; - repeated double image_fingerprint_of_candidate_image_file = 21; - string collection_name_string = 22; - string hash_of_candidate_image_file = 23; - string open_api_group_id_string = 24; - float group_rareness_score = 25; - string candidate_image_thumbnail_webp_as_base64_string = 26; - string does_not_impact_the_following_collection_strings = 27; - bool is_invalid_sense_request = 28; - string invalid_sense_request_reason = 29; - float similarity_score_to_first_entry_in_collection = 30; - float cp_probability = 31; - float child_probability = 32; - string image_fingerprint_set_checksum = 33; -} - - -message InternetRareness { - string rare_on_internet_summary_table_as_json_compressed_b64 = 1; - string rare_on_internet_graph_json_compressed_b64 = 2; - string alternative_rare_on_internet_dict_as_json_compressed_b64 = 3; - uint32 min_number_of_exact_matches_in_page = 4; - string earliest_available_date_of_internet_results = 5; -} - -message AltNsfwScores { - float drawings = 1; - float hentai = 2; - float neutral = 3; - float porn = 4; - float sexy = 5; -} - -message GetStatusRequest {} - -message TaskCount { - int32 max_concurrent = 1; - int32 executing = 2; - int32 waiting_in_queue = 3; - int32 succeeded = 4; - int32 failed = 5; - int32 cancelled = 6; -} - -message TaskMetrics { - float average_task_wait_time_secs = 1; - float max_task_wait_time_secs = 2; - float average_task_execution_time_secs = 3; - int64 average_task_virtual_memory_usage_bytes = 4; - int64 average_task_rss_memory_usage_bytes = 5; - int64 peak_task_rss_memory_usage_bytes = 6; - int64 peak_task_vms_memory_usage_bytes = 7; -} - -message GetStatusResponse { - string version = 1; - TaskCount task_count = 2; - TaskMetrics task_metrics = 3; -} diff --git a/proto/raptorq/raptorq.proto b/proto/raptorq/raptorq.proto deleted file mode 100644 index 07db9baa..00000000 --- a/proto/raptorq/raptorq.proto +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. -syntax = "proto3"; - -option go_package = "github.com/LumeraProtocol/supernode/v2/gen/raptorq"; - -package raptorq; - -service RaptorQ { - rpc EncodeMetaData(EncodeMetaDataRequest) returns(EncodeMetaDataReply); - rpc Encode(EncodeRequest) returns(EncodeReply); - rpc Decode(DecodeRequest) returns(DecodeReply); -} - -message EncodeMetaDataRequest { - string path = 1; - uint32 files_number = 2; - string block_hash = 3; - string pastel_id = 4; -} - -message EncodeMetaDataReply { - bytes encoder_parameters = 1; - uint32 symbols_count = 2; - string path = 3; -} - -message EncodeRequest { - string path = 1; -} - -message EncodeReply { - bytes encoder_parameters = 1; - uint32 symbols_count = 2; - string path = 3; -} - -message DecodeRequest { - bytes encoder_parameters = 1; - string path = 2; -} - -message DecodeReply { - string path = 1; -} \ No newline at end of file From 90b89d6ca0531bdd29e4a4ed1048d78075190b64 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Sat, 4 Oct 2025 15:18:10 +0500 Subject: [PATCH 04/36] Fixes --- .gitignore | 2 +- supernode/cascade/adaptors/lumera.go | 48 +++ supernode/cascade/adaptors/p2p.go | 116 ++++++ supernode/cascade/adaptors/rq.go | 46 +++ supernode/{services => }/cascade/config.go | 7 +- supernode/cascade/download.go | 250 ++++++++++++ supernode/cascade/download_helpers.go | 47 +++ supernode/{services => }/cascade/events.go | 0 supernode/cascade/helper.go | 163 ++++++++ .../{services => }/cascade/interfaces.go | 10 +- supernode/cascade/register.go | 122 ++++++ supernode/cascade/service.go | 44 ++ supernode/cascade/task.go | 38 ++ supernode/cmd/init.go | 39 +- supernode/cmd/service.go | 45 ++- supernode/cmd/start.go | 162 +++++--- supernode/cmd/supernode.go | 140 ------- supernode/config/defaults.go | 16 + supernode/config/save.go | 32 +- .../server/cascade/cascade_action_server.go | 367 ----------------- .../cascade/cascade_action_server_mock.go | 41 -- .../cascade/cascade_action_server_test.go | 97 ----- .../node/action/server/cascade/helper.go | 39 -- supernode/node/supernode/gateway/swagger.json | 279 ------------- supernode/node/supernode/server/config.go | 19 - .../node/supernode/server/config_test.go | 16 - .../node/supernode/server/mock_keyring.go | 379 ------------------ .../node/supernode/server/server_test.go | 68 ---- .../node/supernode/server/status_server.go | 204 ---------- .../supernode/server/status_server_test.go | 79 ---- supernode/services/cascade/adaptors/lumera.go | 81 ---- .../cascade/adaptors/mocks/lumera_mock.go | 127 ------ .../cascade/adaptors/mocks/p2p_mock.go | 51 --- .../cascade/adaptors/mocks/rq_mock.go | 85 ---- supernode/services/cascade/adaptors/p2p.go | 282 ------------- supernode/services/cascade/adaptors/rq.go | 81 ---- supernode/services/cascade/download.go | 375 ----------------- supernode/services/cascade/events_test.go | 35 -- supernode/services/cascade/helper.go | 292 -------------- .../cascade/mocks/cascade_interfaces_mock.go | 115 ------ supernode/services/cascade/register.go | 191 --------- supernode/services/cascade/register_test.go | 315 --------------- supernode/services/cascade/service.go | 47 --- supernode/services/cascade/service_test.go | 71 ---- supernode/services/cascade/status.go | 19 - supernode/services/cascade/status_test.go | 85 ---- supernode/services/cascade/task.go | 56 --- .../services/common/base/supernode_service.go | 70 ---- .../services/common/base/supernode_task.go | 71 ---- .../common/base/supernode_task_test.go | 82 ---- supernode/services/common/config.go | 19 - .../services/common/supernode/service.go | 262 ------------ .../services/common/supernode/service_test.go | 51 --- supernode/services/common/supernode/types.go | 111 ----- supernode/services/common/task_status.go | 51 --- supernode/services/common/task_status_test.go | 59 --- supernode/services/verifier/interface.go | 55 --- supernode/services/verifier/verifier.go | 222 ---------- supernode/services/verifier/verifier_test.go | 261 ------------ .../common/supernode => status}/metrics.go | 40 +- supernode/status/service.go | 192 +++++++++ .../supernode => transport}/gateway/server.go | 0 .../gateway/swagger.go | 54 +-- supernode/transport/gateway/swagger.json | 46 +++ supernode/transport/grpc/cascade/handler.go | 311 ++++++++++++++ supernode/transport/grpc/status/handler.go | 25 ++ .../grpc/status}/server.go | 99 ++--- supernode/verifier/interface.go | 31 ++ supernode/verifier/verifier.go | 76 ++++ tests/system/e2e_cascade_test.go | 2 +- 70 files changed, 1828 insertions(+), 5585 deletions(-) create mode 100644 supernode/cascade/adaptors/lumera.go create mode 100644 supernode/cascade/adaptors/p2p.go create mode 100644 supernode/cascade/adaptors/rq.go rename supernode/{services => }/cascade/config.go (63%) create mode 100644 supernode/cascade/download.go create mode 100644 supernode/cascade/download_helpers.go rename supernode/{services => }/cascade/events.go (100%) create mode 100644 supernode/cascade/helper.go rename supernode/{services => }/cascade/interfaces.go (53%) create mode 100644 supernode/cascade/register.go create mode 100644 supernode/cascade/service.go create mode 100644 supernode/cascade/task.go delete mode 100644 supernode/cmd/supernode.go create mode 100644 supernode/config/defaults.go delete mode 100644 supernode/node/action/server/cascade/cascade_action_server.go delete mode 100644 supernode/node/action/server/cascade/cascade_action_server_mock.go delete mode 100644 supernode/node/action/server/cascade/cascade_action_server_test.go delete mode 100644 supernode/node/action/server/cascade/helper.go delete mode 100644 supernode/node/supernode/gateway/swagger.json delete mode 100644 supernode/node/supernode/server/config.go delete mode 100644 supernode/node/supernode/server/config_test.go delete mode 100644 supernode/node/supernode/server/mock_keyring.go delete mode 100644 supernode/node/supernode/server/server_test.go delete mode 100644 supernode/node/supernode/server/status_server.go delete mode 100644 supernode/node/supernode/server/status_server_test.go delete mode 100644 supernode/services/cascade/adaptors/lumera.go delete mode 100644 supernode/services/cascade/adaptors/mocks/lumera_mock.go delete mode 100644 supernode/services/cascade/adaptors/mocks/p2p_mock.go delete mode 100644 supernode/services/cascade/adaptors/mocks/rq_mock.go delete mode 100644 supernode/services/cascade/adaptors/p2p.go delete mode 100644 supernode/services/cascade/adaptors/rq.go delete mode 100644 supernode/services/cascade/download.go delete mode 100644 supernode/services/cascade/events_test.go delete mode 100644 supernode/services/cascade/helper.go delete mode 100644 supernode/services/cascade/mocks/cascade_interfaces_mock.go delete mode 100644 supernode/services/cascade/register.go delete mode 100644 supernode/services/cascade/register_test.go delete mode 100644 supernode/services/cascade/service.go delete mode 100644 supernode/services/cascade/service_test.go delete mode 100644 supernode/services/cascade/status.go delete mode 100644 supernode/services/cascade/status_test.go delete mode 100644 supernode/services/cascade/task.go delete mode 100644 supernode/services/common/base/supernode_service.go delete mode 100644 supernode/services/common/base/supernode_task.go delete mode 100644 supernode/services/common/base/supernode_task_test.go delete mode 100644 supernode/services/common/config.go delete mode 100644 supernode/services/common/supernode/service.go delete mode 100644 supernode/services/common/supernode/service_test.go delete mode 100644 supernode/services/common/supernode/types.go delete mode 100644 supernode/services/common/task_status.go delete mode 100644 supernode/services/common/task_status_test.go delete mode 100644 supernode/services/verifier/interface.go delete mode 100644 supernode/services/verifier/verifier.go delete mode 100644 supernode/services/verifier/verifier_test.go rename supernode/{services/common/supernode => status}/metrics.go (74%) create mode 100644 supernode/status/service.go rename supernode/{node/supernode => transport}/gateway/server.go (100%) rename supernode/{node/supernode => transport}/gateway/swagger.go (59%) create mode 100644 supernode/transport/gateway/swagger.json create mode 100644 supernode/transport/grpc/cascade/handler.go create mode 100644 supernode/transport/grpc/status/handler.go rename supernode/{node/supernode/server => transport/grpc/status}/server.go (63%) create mode 100644 supernode/verifier/interface.go create mode 100644 supernode/verifier/verifier.go diff --git a/.gitignore b/.gitignore index 685adc58..dcce8ceb 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,7 @@ *.so *.dylib *.idea/ - +*.zip # Test binary, built with `go test -c` *.test diff --git a/supernode/cascade/adaptors/lumera.go b/supernode/cascade/adaptors/lumera.go new file mode 100644 index 00000000..2bd4ad27 --- /dev/null +++ b/supernode/cascade/adaptors/lumera.go @@ -0,0 +1,48 @@ +package adaptors + +import ( + "context" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + sdktx "github.com/cosmos/cosmos-sdk/types/tx" +) + +type LumeraClient interface { + GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) + GetTopSupernodes(ctx context.Context, blockHeight uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) + Verify(ctx context.Context, address string, msg []byte, sig []byte) error + GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) + SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) + FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) +} + +type lumeraImpl struct { c lumera.Client } + +func NewLumeraClient(c lumera.Client) LumeraClient { return &lumeraImpl{c: c} } + +func (l *lumeraImpl) GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) { + return l.c.Action().GetAction(ctx, actionID) +} + +func (l *lumeraImpl) GetTopSupernodes(ctx context.Context, blockHeight uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) { + return l.c.SuperNode().GetTopSuperNodesForBlock(ctx, blockHeight) +} + +func (l *lumeraImpl) Verify(ctx context.Context, address string, msg []byte, sig []byte) error { + return l.c.Auth().Verify(ctx, address, msg, sig) +} + +func (l *lumeraImpl) GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) { + return l.c.Action().GetActionFee(ctx, dataSizeKB) +} + +func (l *lumeraImpl) SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) { + return l.c.ActionMsg().SimulateFinalizeCascadeAction(ctx, actionID, rqids) +} + +func (l *lumeraImpl) FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) { + return l.c.ActionMsg().FinalizeCascadeAction(ctx, actionID, rqids) +} + diff --git a/supernode/cascade/adaptors/p2p.go b/supernode/cascade/adaptors/p2p.go new file mode 100644 index 00000000..8f5e81bb --- /dev/null +++ b/supernode/cascade/adaptors/p2p.go @@ -0,0 +1,116 @@ +package adaptors + +import ( + "context" + "fmt" + "io/fs" + "math" + "math/rand/v2" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/LumeraProtocol/supernode/v2/p2p" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" +) + +const ( + loadSymbolsBatchSize = 3000 + storeSymbolsPercent = 18 + storeBatchContextTimeout = 3 * time.Minute + P2PDataRaptorQSymbol = 1 +) + +type P2PService interface { StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error } + +type p2pImpl struct { p2p p2p.Client; rqStore rqstore.Store } + +func NewP2PService(client p2p.Client, store rqstore.Store) P2PService { return &p2pImpl{p2p: client, rqStore: store} } + +type StoreArtefactsRequest struct { TaskID string; ActionID string; IDFiles [][]byte; SymbolsDir string } + +func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error { + logtrace.Info(ctx, "store: p2p start", logtrace.Fields{"taskID": req.TaskID, "actionID": req.ActionID, "id_files": len(req.IDFiles), "symbols_dir": req.SymbolsDir}) + start := time.Now() + firstPassSymbols, totalSymbols, err := p.storeCascadeSymbolsAndData(ctx, req.TaskID, req.ActionID, req.SymbolsDir, req.IDFiles) + if err != nil { return fmt.Errorf("error storing artefacts: %w", err) } + _ = firstPassSymbols; _ = totalSymbols; _ = start + remaining := 0 + if req.SymbolsDir != "" { if keys, werr := walkSymbolTree(req.SymbolsDir); werr == nil { remaining = len(keys) } } + logtrace.Info(ctx, "store: first-pass complete", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total_available": totalSymbols, "id_files_count": len(req.IDFiles), "symbols_left_on_disk": remaining, "ms": time.Since(start).Milliseconds()}) + if remaining == 0 { logtrace.Info(ctx, "store: dir empty after first-pass", logtrace.Fields{"taskID": req.TaskID, "dir": req.SymbolsDir}) } + return nil +} + +func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, actionID string, symbolsDir string, metadataFiles [][]byte) (int, int, error) { + if err := p.rqStore.StoreSymbolDirectory(taskID, symbolsDir); err != nil { return 0, 0, fmt.Errorf("store symbol dir: %w", err) } + keys, err := walkSymbolTree(symbolsDir); if err != nil { return 0, 0, err } + totalAvailable := len(keys) + targetCount := int(math.Ceil(float64(totalAvailable) * storeSymbolsPercent / 100.0)) + if targetCount < 1 && totalAvailable > 0 { targetCount = 1 } + logtrace.Info(ctx, "store: symbols discovered", logtrace.Fields{"total_symbols": totalAvailable, "dir": symbolsDir}) + logtrace.Info(ctx, "store: target coverage", logtrace.Fields{"total_symbols": totalAvailable, "target_percent": storeSymbolsPercent, "target_count": targetCount}) + if len(keys) > loadSymbolsBatchSize { + want := targetCount + if want < len(keys) { rand.Shuffle(len(keys), func(i, j int) { keys[i], keys[j] = keys[j], keys[i] }); keys = keys[:want] } + sort.Strings(keys) + } + logtrace.Info(ctx, "store: selected symbols", logtrace.Fields{"selected": len(keys), "of_total": totalAvailable, "dir": symbolsDir}) + logtrace.Info(ctx, "store: sending symbols", logtrace.Fields{"count": len(keys)}) + totalSymbols := 0 + firstBatchProcessed := false + for start := 0; start < len(keys); { + end := min(start+loadSymbolsBatchSize, len(keys)) + batch := keys[start:end] + if !firstBatchProcessed && len(metadataFiles) > 0 { + roomForSymbols := loadSymbolsBatchSize - len(metadataFiles) + if roomForSymbols < 0 { roomForSymbols = 0 } + if roomForSymbols < len(batch) { batch = batch[:roomForSymbols]; end = start + roomForSymbols } + symBytes, err := utils.LoadSymbols(symbolsDir, batch); if err != nil { return 0, 0, fmt.Errorf("load symbols: %w", err) } + payload := make([][]byte, 0, len(metadataFiles)+len(symBytes)); payload = append(payload, metadataFiles...); payload = append(payload, symBytes...) + logtrace.Info(ctx, "store: batch send (first)", logtrace.Fields{"taskID": taskID, "metadata_count": len(metadataFiles), "symbols_in_batch": len(symBytes), "payload_total": len(payload)}) + bctx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout); err = p.p2p.StoreBatch(bctx, payload, P2PDataRaptorQSymbol, taskID); cancel(); if err != nil { return totalSymbols, totalAvailable, fmt.Errorf("p2p store batch (first): %w", err) } + logtrace.Info(ctx, "store: batch ok (first)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symBytes)}) + totalSymbols += len(symBytes) + if len(batch) > 0 { if err := utils.DeleteSymbols(ctx, symbolsDir, batch); err != nil { return totalSymbols, totalAvailable, fmt.Errorf("delete symbols: %w", err) } } + firstBatchProcessed = true + } else { + count, err := p.storeSymbolsInP2P(ctx, taskID, symbolsDir, batch); if err != nil { return totalSymbols, totalAvailable, err } + totalSymbols += count + } + start = end + } + if err := p.rqStore.UpdateIsFirstBatchStored(actionID); err != nil { return totalSymbols, totalAvailable, fmt.Errorf("update first-batch flag: %w", err) } + return totalSymbols, totalAvailable, nil +} + +func walkSymbolTree(root string) ([]string, error) { + var keys []string + err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { + if err != nil { return err } + if d.IsDir() { return nil } + if strings.EqualFold(filepath.Ext(d.Name()), ".json") { return nil } + rel, err := filepath.Rel(root, path); if err != nil { return err } + keys = append(keys, rel) + return nil + }) + if err != nil { return nil, fmt.Errorf("walk symbol tree: %w", err) } + return keys, nil +} + +func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) (int, error) { + logtrace.Debug(ctx, "loading batch symbols", logtrace.Fields{"taskID": taskID, "count": len(fileKeys)}) + symbols, err := utils.LoadSymbols(root, fileKeys); if err != nil { return 0, fmt.Errorf("load symbols: %w", err) } + symCtx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout); defer cancel() + logtrace.Info(ctx, "store: batch send (symbols)", logtrace.Fields{"taskID": taskID, "symbols_in_batch": len(symbols)}) + if err := c.p2p.StoreBatch(symCtx, symbols, P2PDataRaptorQSymbol, taskID); err != nil { return len(symbols), fmt.Errorf("p2p store batch: %w", err) } + logtrace.Info(ctx, "store: batch ok (symbols)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symbols)}) + if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { return len(symbols), fmt.Errorf("delete symbols: %w", err) } + return len(symbols), nil +} + +func min(a, b int) int { if a < b { return a }; return b } + diff --git a/supernode/cascade/adaptors/rq.go b/supernode/cascade/adaptors/rq.go new file mode 100644 index 00000000..a5ef9389 --- /dev/null +++ b/supernode/cascade/adaptors/rq.go @@ -0,0 +1,46 @@ +package adaptors + +import ( + "context" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" +) + +// CodecService wraps codec operations used by cascade +type CodecService interface { + EncodeInput(ctx context.Context, actionID string, path string, dataSize int) (EncodeResult, error) + Decode(ctx context.Context, req DecodeRequest) (DecodeResult, error) +} + +type EncodeResult struct { + SymbolsDir string + Metadata codec.Layout +} + +type DecodeRequest struct { + ActionID string + Symbols map[string][]byte + Layout codec.Layout +} + +type DecodeResult struct { + FilePath string + DecodeTmpDir string +} + +type codecImpl struct{ codec codec.Codec } + +func NewCodecService(c codec.Codec) CodecService { return &codecImpl{codec: c} } + +func (c *codecImpl) EncodeInput(ctx context.Context, actionID, path string, dataSize int) (EncodeResult, error) { + res, err := c.codec.Encode(ctx, codec.EncodeRequest{TaskID: actionID, Path: path, DataSize: dataSize}) + if err != nil { return EncodeResult{}, err } + return EncodeResult{SymbolsDir: res.SymbolsDir, Metadata: res.Metadata}, nil +} + +func (c *codecImpl) Decode(ctx context.Context, req DecodeRequest) (DecodeResult, error) { + res, err := c.codec.Decode(ctx, codec.DecodeRequest{ActionID: req.ActionID, Symbols: req.Symbols, Layout: req.Layout}) + if err != nil { return DecodeResult{}, err } + return DecodeResult{FilePath: res.FilePath, DecodeTmpDir: res.DecodeTmpDir}, nil +} + diff --git a/supernode/services/cascade/config.go b/supernode/cascade/config.go similarity index 63% rename from supernode/services/cascade/config.go rename to supernode/cascade/config.go index df6abd1f..c2b63822 100644 --- a/supernode/services/cascade/config.go +++ b/supernode/cascade/config.go @@ -1,12 +1,9 @@ package cascade -import ( - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" -) - // Config contains settings for the cascade service type Config struct { - common.Config `mapstructure:",squash" json:"-"` + // SupernodeAccountAddress is the on-chain account address of this supernode. + SupernodeAccountAddress string `mapstructure:"-" json:"-"` RaptorQServiceAddress string `mapstructure:"-" json:"-"` RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"` diff --git a/supernode/cascade/download.go b/supernode/cascade/download.go new file mode 100644 index 00000000..90a3eab9 --- /dev/null +++ b/supernode/cascade/download.go @@ -0,0 +1,250 @@ +package cascade + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "sort" + "time" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/crypto" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/supernode/cascade/adaptors" +) + +const targetRequiredPercent = 17 + +type DownloadRequest struct { + ActionID string + Signature string +} + +type DownloadResponse struct { + EventType SupernodeEventType + Message string + FilePath string + DownloadedDir string +} + +func (task *CascadeRegistrationTask) Download(ctx context.Context, req *DownloadRequest, send func(resp *DownloadResponse) error) (err error) { + if req != nil && req.ActionID != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) + ctx = logtrace.CtxWithOrigin(ctx, "download") + } + fields := logtrace.Fields{logtrace.FieldMethod: "Download", logtrace.FieldRequest: req} + logtrace.Info(ctx, "download: request", fields) + + actionDetails, err := task.LumeraClient.GetAction(ctx, req.ActionID) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return task.wrapErr(ctx, "failed to get action", err, fields) + } + logtrace.Info(ctx, "download: action fetched", fields) + task.streamDownloadEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", "", send) + + if actionDetails.GetAction().State != actiontypes.ActionStateDone { + err = errors.New("action is not in a valid state") + fields[logtrace.FieldError] = "action state is not done yet" + fields[logtrace.FieldActionState] = actionDetails.GetAction().State + return task.wrapErr(ctx, "action not finalized yet", err, fields) + } + logtrace.Info(ctx, "download: action state ok", fields) + + metadata, err := cascadekit.UnmarshalCascadeMetadata(actionDetails.GetAction().Metadata) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return task.wrapErr(ctx, "error decoding cascade metadata", err, fields) + } + logtrace.Info(ctx, "download: metadata decoded", fields) + task.streamDownloadEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", "", send) + + if !metadata.Public { + if req.Signature == "" { + fields[logtrace.FieldError] = "missing signature for private download" + return task.wrapErr(ctx, "private cascade requires a download signature", nil, fields) + } + if err := task.VerifyDownloadSignature(ctx, req.ActionID, req.Signature); err != nil { + fields[logtrace.FieldError] = err.Error() + return task.wrapErr(ctx, "failed to verify download signature", err, fields) + } + logtrace.Info(ctx, "download: signature verified", fields) + } else { + logtrace.Info(ctx, "download: public cascade (no signature)", fields) + } + + task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) + + logtrace.Info(ctx, "download: network retrieval start", logtrace.Fields{logtrace.FieldActionID: actionDetails.GetAction().ActionID}) + filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields, send) + if err != nil { + fields[logtrace.FieldError] = err.Error() + if tmpDir != "" { + if cerr := task.CleanupDownload(ctx, tmpDir); cerr != nil { + logtrace.Warn(ctx, "cleanup of tmp dir after error failed", logtrace.Fields{"tmp_dir": tmpDir, logtrace.FieldError: cerr.Error()}) + } + } + return task.wrapErr(ctx, "failed to download artifacts", err, fields) + } + logtrace.Debug(ctx, "File reconstructed and hash verified", fields) + task.streamDownloadEvent(SupernodeEventTypeDecodeCompleted, "Decode completed", filePath, tmpDir, send) + + return nil +} + +func (task *CascadeRegistrationTask) CleanupDownload(ctx context.Context, tmpDir string) error { + if tmpDir == "" { + return nil + } + if err := os.RemoveAll(tmpDir); err != nil { + return err + } + return nil +} + +func (task *CascadeRegistrationTask) VerifyDownloadSignature(ctx context.Context, actionID, signature string) error { + if signature == "" { + return errors.New("signature required") + } + // Fetch the action to get the creator address for verification + act, err := task.LumeraClient.GetAction(ctx, actionID) + if err != nil { + return fmt.Errorf("get action for signature verification: %w", err) + } + creator := act.GetAction().Creator + sigBytes, err := base64.StdEncoding.DecodeString(signature) + if err != nil { + return fmt.Errorf("invalid base64 signature: %w", err) + } + if err := task.LumeraClient.Verify(ctx, creator, []byte(actionID), sigBytes); err != nil { + return err + } + return nil +} + +func (task *CascadeRegistrationTask) streamDownloadEvent(eventType SupernodeEventType, msg, filePath, dir string, send func(resp *DownloadResponse) error) { + _ = send(&DownloadResponse{EventType: eventType, Message: msg, FilePath: filePath, DownloadedDir: dir}) +} + +func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, actionID string, metadata actiontypes.CascadeMetadata, fields logtrace.Fields, send func(resp *DownloadResponse) error) (string, string, error) { + var layout codec.Layout + var layoutFetchMS, layoutDecodeMS int64 + var layoutAttempts int + + // Retrieve via index IDs + if len(metadata.RqIdsIds) > 0 { + for _, indexID := range metadata.RqIdsIds { + iStart := time.Now() + logtrace.Debug(ctx, "RPC Retrieve index file", logtrace.Fields{"index_id": indexID}) + indexFile, err := task.P2PClient.Retrieve(ctx, indexID) + if err != nil || len(indexFile) == 0 { + logtrace.Warn(ctx, "Retrieve index file failed or empty", logtrace.Fields{"index_id": indexID, logtrace.FieldError: fmt.Sprintf("%v", err)}) + continue + } + logtrace.Debug(ctx, "Retrieve index file completed", logtrace.Fields{"index_id": indexID, "bytes": len(indexFile), "ms": time.Since(iStart).Milliseconds()}) + indexData, err := cascadekit.ParseCompressedIndexFile(indexFile) + if err != nil { + logtrace.Warn(ctx, "failed to parse index file", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error()}) + continue + } + var netMS, decMS int64 + var attempts int + layout, netMS, decMS, attempts, err = task.retrieveLayoutFromIndex(ctx, indexData, fields) + if err != nil { + logtrace.Warn(ctx, "failed to retrieve layout from index", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error(), "attempts": attempts}) + continue + } + layoutFetchMS, layoutDecodeMS, layoutAttempts = netMS, decMS, attempts + if len(layout.Blocks) > 0 { + logtrace.Debug(ctx, "layout file retrieved via index", logtrace.Fields{"index_id": indexID, "attempts": attempts, "net_ms": layoutFetchMS, "decode_ms": layoutDecodeMS}) + break + } + } + } + if len(layout.Blocks) == 0 { + return "", "", errors.New("no symbols found in RQ metadata") + } + fields["layout_fetch_ms"], fields["layout_decode_ms"], fields["layout_attempts"] = layoutFetchMS, layoutDecodeMS, layoutAttempts + return task.restoreFileFromLayout(ctx, layout, metadata.DataHash, actionID, send) +} + +func (task *CascadeRegistrationTask) restoreFileFromLayout(ctx context.Context, layout codec.Layout, dataHash string, actionID string, send func(resp *DownloadResponse) error) (string, string, error) { + fields := logtrace.Fields{logtrace.FieldActionID: actionID} + symSet := make(map[string]struct{}) + for _, block := range layout.Blocks { + for _, s := range block.Symbols { + symSet[s] = struct{}{} + } + } + allSymbols := make([]string, 0, len(symSet)) + for s := range symSet { + allSymbols = append(allSymbols, s) + } + sort.Strings(allSymbols) + totalSymbols := len(allSymbols) + fields["totalSymbols"] = totalSymbols + targetRequiredCount := (totalSymbols*targetRequiredPercent + 99) / 100 + if targetRequiredCount < 1 && totalSymbols > 0 { + targetRequiredCount = 1 + } + logtrace.Info(ctx, "download: plan symbols", logtrace.Fields{"total_symbols": totalSymbols, "target_required_percent": targetRequiredPercent, "target_required_count": targetRequiredCount}) + retrieveStart := time.Now() + reqCount := targetRequiredCount + if reqCount > totalSymbols { + reqCount = totalSymbols + } + rStart := time.Now() + logtrace.Info(ctx, "download: batch retrieve start", logtrace.Fields{"action_id": actionID, "requested": reqCount, "total_candidates": totalSymbols}) + symbols, err := task.P2PClient.BatchRetrieve(ctx, allSymbols, reqCount, actionID) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "batch retrieve failed", fields) + return "", "", fmt.Errorf("batch retrieve symbols: %w", err) + } + retrieveMS := time.Since(retrieveStart).Milliseconds() + logtrace.Info(ctx, "download: batch retrieve ok", logtrace.Fields{"action_id": actionID, "received": len(symbols), "ms": time.Since(rStart).Milliseconds()}) + decodeStart := time.Now() + dStart := time.Now() + logtrace.Info(ctx, "download: decode start", logtrace.Fields{"action_id": actionID}) + decodeInfo, err := task.RQ.Decode(ctx, adaptors.DecodeRequest{ActionID: actionID, Symbols: symbols, Layout: layout}) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "decode failed", fields) + return "", "", fmt.Errorf("decode symbols using RaptorQ: %w", err) + } + decodeMS := time.Since(decodeStart).Milliseconds() + logtrace.Info(ctx, "download: decode ok", logtrace.Fields{"action_id": actionID, "ms": time.Since(dStart).Milliseconds(), "tmp_dir": decodeInfo.DecodeTmpDir, "file_path": decodeInfo.FilePath}) + _ = retrieveMS + _ = decodeMS + + // Verify reconstructed file hash matches action metadata + fileHash, herr := crypto.HashFileIncrementally(decodeInfo.FilePath, 0) + if herr != nil { + fields[logtrace.FieldError] = herr.Error() + logtrace.Error(ctx, "failed to hash file", fields) + return "", "", fmt.Errorf("hash file: %w", herr) + } + if fileHash == nil { + fields[logtrace.FieldError] = "file hash is nil" + logtrace.Error(ctx, "failed to hash file", fields) + return "", "", errors.New("file hash is nil") + } + if verr := cascadekit.VerifyB64DataHash(fileHash, dataHash); verr != nil { + fields[logtrace.FieldError] = verr.Error() + logtrace.Error(ctx, "failed to verify hash", fields) + return "", decodeInfo.DecodeTmpDir, verr + } + logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) + logtrace.Info(ctx, "download: file verified", fields) + // Emit minimal JSON payload (metrics system removed) + info := map[string]interface{}{"action_id": actionID, "found_symbols": len(symbols), "target_percent": targetRequiredPercent} + if b, err := json.Marshal(info); err == nil { + task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), decodeInfo.FilePath, decodeInfo.DecodeTmpDir, send) + } + return decodeInfo.FilePath, decodeInfo.DecodeTmpDir, nil +} diff --git a/supernode/cascade/download_helpers.go b/supernode/cascade/download_helpers.go new file mode 100644 index 00000000..73631549 --- /dev/null +++ b/supernode/cascade/download_helpers.go @@ -0,0 +1,47 @@ +package cascade + +import ( + "context" + "time" + + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" +) + +// retrieveLayoutFromIDs tries the given layout IDs in order and returns the first valid layout. +func (task *CascadeRegistrationTask) retrieveLayoutFromIDs(ctx context.Context, layoutIDs []string, fields logtrace.Fields) (codec.Layout, int64, int64, int, error) { + var layout codec.Layout + var netMS, decMS int64 + attempts := 0 + for _, lid := range layoutIDs { + attempts++ + nStart := time.Now() + logtrace.Debug(ctx, "RPC Retrieve layout file", logtrace.Fields{"layout_id": lid}) + raw, err := task.P2PClient.Retrieve(ctx, lid) + if err != nil || len(raw) == 0 { + logtrace.Warn(ctx, "Retrieve layout failed or empty", logtrace.Fields{"layout_id": lid, logtrace.FieldError: err}) + continue + } + netMS = time.Since(nStart).Milliseconds() + dStart := time.Now() + // Layout files are stored as compressed RQ metadata: base64(JSON(layout)).signature.counter + // Use the cascadekit parser to decompress and decode instead of JSON-unmarshalling raw bytes. + parsedLayout, _, _, err := cascadekit.ParseRQMetadataFile(raw) + if err != nil { + logtrace.Warn(ctx, "Parse layout file failed", logtrace.Fields{"layout_id": lid, logtrace.FieldError: err}) + continue + } + layout = parsedLayout + decMS = time.Since(dStart).Milliseconds() + if len(layout.Blocks) > 0 { + return layout, netMS, decMS, attempts, nil + } + } + return codec.Layout{}, netMS, decMS, attempts, nil +} + +// retrieveLayoutFromIndex resolves layout IDs in the index file and tries to fetch a valid layout. +func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context, index cascadekit.IndexFile, fields logtrace.Fields) (codec.Layout, int64, int64, int, error) { + return task.retrieveLayoutFromIDs(ctx, index.LayoutIDs, fields) +} diff --git a/supernode/services/cascade/events.go b/supernode/cascade/events.go similarity index 100% rename from supernode/services/cascade/events.go rename to supernode/cascade/events.go diff --git a/supernode/cascade/helper.go b/supernode/cascade/helper.go new file mode 100644 index 00000000..898d1779 --- /dev/null +++ b/supernode/cascade/helper.go @@ -0,0 +1,163 @@ +package cascade + +import ( + "context" + "encoding/base64" + "strconv" + + "cosmossdk.io/math" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/supernode/cascade/adaptors" + + sdk "github.com/cosmos/cosmos-sdk/types" + json "github.com/json-iterator/go" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (task *CascadeRegistrationTask) fetchAction(ctx context.Context, actionID string, f logtrace.Fields) (*actiontypes.Action, error) { + res, err := task.LumeraClient.GetAction(ctx, actionID) + if err != nil { + return nil, task.wrapErr(ctx, "failed to get action", err, f) + } + if res.GetAction().ActionID == "" { + return nil, task.wrapErr(ctx, "action not found", errors.New(""), f) + } + logtrace.Debug(ctx, "action has been retrieved", f) + return res.GetAction(), nil +} + +func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, blockHeight uint64, f logtrace.Fields) error { + top, err := task.LumeraClient.GetTopSupernodes(ctx, blockHeight) + if err != nil { + return task.wrapErr(ctx, "failed to get top SNs", err, f) + } + logtrace.Debug(ctx, "Fetched Top Supernodes", f) + if !supernode.Exists(top.Supernodes, task.config.SupernodeAccountAddress) { + addresses := make([]string, len(top.Supernodes)) + for i, sn := range top.Supernodes { + addresses[i] = sn.SupernodeAccount + } + logtrace.Debug(ctx, "Supernode not in top list", logtrace.Fields{"currentAddress": task.config.SupernodeAccountAddress, "topSupernodes": addresses}) + return task.wrapErr(ctx, "current supernode does not exist in the top SNs list", errors.Errorf("current address: %s, top supernodes: %v", task.config.SupernodeAccountAddress, addresses), f) + } + return nil +} + +func (task *CascadeRegistrationTask) encodeInput(ctx context.Context, actionID string, path string, dataSize int, f logtrace.Fields) (*adaptors.EncodeResult, error) { + resp, err := task.RQ.EncodeInput(ctx, actionID, path, dataSize) + if err != nil { + return nil, task.wrapErr(ctx, "failed to encode data", err, f) + } + return &resp, nil +} + +func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context.Context, encoded string, creator string, encodedMeta codec.Layout, f logtrace.Fields) (codec.Layout, string, error) { + indexFileB64, creatorSig, err := cascadekit.ExtractIndexAndCreatorSig(encoded) + if err != nil { + return codec.Layout{}, "", task.wrapErr(ctx, "failed to extract index file and creator signature", err, f) + } + creatorSigBytes, err := base64.StdEncoding.DecodeString(creatorSig) + if err != nil { + return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode creator signature from base64", err, f) + } + if err := task.LumeraClient.Verify(ctx, creator, []byte(indexFileB64), creatorSigBytes); err != nil { + return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify creator signature", err, f) + } + logtrace.Debug(ctx, "creator signature successfully verified", f) + indexFile, err := cascadekit.DecodeIndexB64(indexFileB64) + if err != nil { + return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode index file", err, f) + } + layoutSigBytes, err := base64.StdEncoding.DecodeString(indexFile.LayoutSignature) + if err != nil { + return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode layout signature from base64", err, f) + } + layoutJSON, err := json.Marshal(encodedMeta) + if err != nil { + return codec.Layout{}, "", task.wrapErr(ctx, "failed to marshal layout", err, f) + } + layoutB64 := utils.B64Encode(layoutJSON) + if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSigBytes); err != nil { + return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify layout signature", err, f) + } + logtrace.Debug(ctx, "layout signature successfully verified", f) + return encodedMeta, indexFile.LayoutSignature, nil +} + +func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta actiontypes.CascadeMetadata, sig, creator string, encodedMeta codec.Layout, f logtrace.Fields) (cascadekit.GenRQIdentifiersFilesResponse, error) { + layoutRes, err := cascadekit.GenerateLayoutFiles(ctx, encodedMeta, sig, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) + if err != nil { + return cascadekit.GenRQIdentifiersFilesResponse{}, task.wrapErr(ctx, "failed to generate layout files", err, f) + } + indexIDs, indexFiles, err := cascadekit.GenerateIndexFiles(ctx, meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) + if err != nil { + return cascadekit.GenRQIdentifiersFilesResponse{}, task.wrapErr(ctx, "failed to generate index files", err, f) + } + allFiles := append(layoutRes.RedundantMetadataFiles, indexFiles...) + return cascadekit.GenRQIdentifiersFilesResponse{RQIDs: indexIDs, RedundantMetadataFiles: allFiles}, nil +} + +func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionID string, idFiles [][]byte, symbolsDir string, f logtrace.Fields) error { + if f == nil { + f = logtrace.Fields{} + } + lf := logtrace.Fields{logtrace.FieldActionID: actionID, logtrace.FieldTaskID: task.taskID, "id_files_count": len(idFiles), "symbols_dir": symbolsDir} + for k, v := range f { + lf[k] = v + } + ctx = logtrace.CtxWithOrigin(ctx, "first_pass") + logtrace.Info(ctx, "store: first-pass begin", lf) + if err := task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{IDFiles: idFiles, SymbolsDir: symbolsDir, TaskID: task.taskID, ActionID: actionID}, f); err != nil { + return task.wrapErr(ctx, "failed to store artefacts", err, lf) + } + return nil +} + +func (task *CascadeRegistrationTask) wrapErr(ctx context.Context, msg string, err error, f logtrace.Fields) error { + if err != nil { + f[logtrace.FieldError] = err.Error() + } + logtrace.Error(ctx, msg, f) + if err != nil { + return status.Errorf(codes.Internal, "%s: %v", msg, err) + } + return status.Errorf(codes.Internal, "%s", msg) +} + +func (task *CascadeRegistrationTask) emitArtefactsStored(ctx context.Context, fields logtrace.Fields, _ codec.Layout, send func(resp *RegisterResponse) error) { + if fields == nil { + fields = logtrace.Fields{} + } + msg := "Artefacts stored" + logtrace.Debug(ctx, "artefacts have been stored", fields) + task.streamEvent(SupernodeEventTypeArtefactsStored, msg, "", send) +} + +func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action *actiontypes.Action, dataSize int, fields logtrace.Fields) error { + dataSizeInKBs := dataSize / 1024 + fee, err := task.LumeraClient.GetActionFee(ctx, strconv.Itoa(dataSizeInKBs)) + if err != nil { + return task.wrapErr(ctx, "failed to get action fee", err, fields) + } + amount, err := strconv.ParseInt(fee.Amount, 10, 64) + if err != nil { + return task.wrapErr(ctx, "failed to parse fee amount", err, fields) + } + requiredFee := sdk.NewCoin("ulume", math.NewInt(amount)) + logtrace.Debug(ctx, "calculated required fee", logtrace.Fields{"fee": requiredFee.String(), "dataBytes": dataSize}) + if action.Price == nil || action.Price.String() != requiredFee.String() { + got := "" + if action.Price != nil { + got = action.Price.String() + } + return task.wrapErr(ctx, "insufficient fee", errors.Errorf("expected at least %s, got %s", requiredFee.String(), got), fields) + } + return nil +} diff --git a/supernode/services/cascade/interfaces.go b/supernode/cascade/interfaces.go similarity index 53% rename from supernode/services/cascade/interfaces.go rename to supernode/cascade/interfaces.go index e782bc23..31055a17 100644 --- a/supernode/services/cascade/interfaces.go +++ b/supernode/cascade/interfaces.go @@ -1,19 +1,19 @@ package cascade import ( - "context" + "context" ) // CascadeServiceFactory defines an interface to create cascade tasks // //go:generate mockgen -destination=mocks/cascade_interfaces_mock.go -package=cascademocks -source=interfaces.go type CascadeServiceFactory interface { - NewCascadeRegistrationTask() CascadeTask + NewCascadeRegistrationTask() CascadeTask } // CascadeTask interface defines operations for cascade registration and data management type CascadeTask interface { - Register(ctx context.Context, req *RegisterRequest, send func(resp *RegisterResponse) error) error - Download(ctx context.Context, req *DownloadRequest, send func(resp *DownloadResponse) error) error - CleanupDownload(ctx context.Context, actionID string) error + Register(ctx context.Context, req *RegisterRequest, send func(resp *RegisterResponse) error) error + Download(ctx context.Context, req *DownloadRequest, send func(resp *DownloadResponse) error) error + CleanupDownload(ctx context.Context, actionID string) error } diff --git a/supernode/cascade/register.go b/supernode/cascade/register.go new file mode 100644 index 00000000..74d93c62 --- /dev/null +++ b/supernode/cascade/register.go @@ -0,0 +1,122 @@ +package cascade + +import ( + "context" + "os" + + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" +) + +// RegisterRequest contains parameters for upload request +type RegisterRequest struct { + TaskID string + ActionID string + DataHash []byte + DataSize int + FilePath string +} + +// RegisterResponse contains the result of upload +type RegisterResponse struct { + EventType SupernodeEventType + Message string + TxHash string +} + +func (task *CascadeRegistrationTask) Register( + ctx context.Context, + req *RegisterRequest, + send func(resp *RegisterResponse) error, +) (err error) { + if req != nil && req.ActionID != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) + ctx = logtrace.CtxWithOrigin(ctx, "first_pass") + task.taskID = req.TaskID + } + + fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} + logtrace.Info(ctx, "register: request", fields) + defer func() { + if req != nil && req.FilePath != "" { + if remErr := os.RemoveAll(req.FilePath); remErr != nil { + logtrace.Warn(ctx, "Failed to remove uploaded file", fields) + } else { + logtrace.Debug(ctx, "Uploaded file cleaned up", fields) + } + } + }() + + action, err := task.fetchAction(ctx, req.ActionID, fields) + if err != nil { return err } + fields[logtrace.FieldBlockHeight] = action.BlockHeight + fields[logtrace.FieldCreator] = action.Creator + fields[logtrace.FieldStatus] = action.State + fields[logtrace.FieldPrice] = action.Price + logtrace.Info(ctx, "register: action fetched", fields) + task.streamEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", send) + + if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil { return err } + logtrace.Info(ctx, "register: fee verified", fields) + task.streamEvent(SupernodeEventTypeActionFeeVerified, "Action fee verified", "", send) + + fields[logtrace.FieldSupernodeState] = task.config.SupernodeAccountAddress + if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil { return err } + logtrace.Info(ctx, "register: top supernode confirmed", fields) + task.streamEvent(SupernodeEventTypeTopSupernodeCheckPassed, "Top supernode eligibility confirmed", "", send) + + cascadeMeta, err := cascadekit.UnmarshalCascadeMetadata(action.Metadata) + if err != nil { return task.wrapErr(ctx, "failed to unmarshal cascade metadata", err, fields) } + logtrace.Info(ctx, "register: metadata decoded", fields) + task.streamEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", send) + + if err := cascadekit.VerifyB64DataHash(req.DataHash, cascadeMeta.DataHash); err != nil { return err } + logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) + logtrace.Info(ctx, "register: data hash matched", fields) + task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send) + + encResp, err := task.encodeInput(ctx, req.ActionID, req.FilePath, req.DataSize, fields) + if err != nil { return err } + fields["symbols_dir"] = encResp.SymbolsDir + logtrace.Info(ctx, "register: input encoded", fields) + task.streamEvent(SupernodeEventTypeInputEncoded, "Input encoded", "", send) + + layout, signature, err := task.verifySignatureAndDecodeLayout(ctx, cascadeMeta.Signatures, action.Creator, encResp.Metadata, fields) + if err != nil { return err } + logtrace.Info(ctx, "register: signature verified", fields) + task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) + + rqidResp, err := task.generateRQIDFiles(ctx, cascadeMeta, signature, action.Creator, encResp.Metadata, fields) + if err != nil { return err } + fields["id_files_count"] = len(rqidResp.RedundantMetadataFiles) + logtrace.Info(ctx, "register: rqid files generated", fields) + task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send) + + if err := cascadekit.VerifySingleBlockIDs(layout, encResp.Metadata); err != nil { return task.wrapErr(ctx, "failed to verify IDs", err, fields) } + logtrace.Info(ctx, "register: rqids validated", fields) + task.streamEvent(SupernodeEventTypeRqIDsVerified, "RQIDs verified", "", send) + + if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqidResp.RQIDs); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Info(ctx, "register: finalize simulation failed", fields) + task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) + return task.wrapErr(ctx, "finalize action simulation failed", err, fields) + } + logtrace.Info(ctx, "register: finalize simulation passed", fields) + task.streamEvent(SupernodeEventTypeFinalizeSimulated, "Finalize simulation passed", "", send) + + if err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields); err != nil { return err } + task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) + + resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Info(ctx, "register: finalize action error", fields) + return task.wrapErr(ctx, "failed to finalize action", err, fields) + } + txHash := resp.TxResponse.TxHash + fields[logtrace.FieldTxHash] = txHash + logtrace.Info(ctx, "register: action finalized", fields) + task.streamEvent(SupernodeEventTypeActionFinalized, "Action finalized", txHash, send) + return nil +} diff --git a/supernode/cascade/service.go b/supernode/cascade/service.go new file mode 100644 index 00000000..49dde1f1 --- /dev/null +++ b/supernode/cascade/service.go @@ -0,0 +1,44 @@ +package cascade + +import ( + "context" + + "github.com/LumeraProtocol/supernode/v2/p2p" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/v2/supernode/cascade/adaptors" +) + +type CascadeService struct { + config *Config + + LumeraClient adaptors.LumeraClient + P2P adaptors.P2PService + RQ adaptors.CodecService + P2PClient p2p.Client +} + +// Compile-time checks to ensure CascadeService implements required interfaces +var _ CascadeServiceFactory = (*CascadeService)(nil) + +// NewCascadeRegistrationTask creates a new task for cascade registration +func (service *CascadeService) NewCascadeRegistrationTask() CascadeTask { + task := NewCascadeRegistrationTask(service) + return task +} + +// Run starts the service (no background workers) +func (service *CascadeService) Run(ctx context.Context) error { <-ctx.Done(); return nil } + +// NewCascadeService returns a new CascadeService instance +func NewCascadeService(config *Config, lumera lumera.Client, p2pClient p2p.Client, codec codec.Codec, rqstore rqstore.Store) *CascadeService { + return &CascadeService{ + config: config, + LumeraClient: adaptors.NewLumeraClient(lumera), + P2P: adaptors.NewP2PService(p2pClient, rqstore), + RQ: adaptors.NewCodecService(codec), + P2PClient: p2pClient, + } +} + diff --git a/supernode/cascade/task.go b/supernode/cascade/task.go new file mode 100644 index 00000000..6d466e57 --- /dev/null +++ b/supernode/cascade/task.go @@ -0,0 +1,38 @@ +package cascade + +import ( + "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" +) + +// CascadeRegistrationTask is the task for cascade registration +type CascadeRegistrationTask struct { + *CascadeService + + Asset *files.File + dataHash string + creatorSignature []byte + taskID string +} + +const ( + logPrefix = "cascade" +) + +// Compile-time check to ensure CascadeRegistrationTask implements CascadeTask interface +var _ CascadeTask = (*CascadeRegistrationTask)(nil) + +func (task *CascadeRegistrationTask) removeArtifacts() { + if task.Asset != nil { + _ = task.Asset.Remove() + } +} + +// NewCascadeRegistrationTask returns a new Task instance +func NewCascadeRegistrationTask(service *CascadeService) *CascadeRegistrationTask { + return &CascadeRegistrationTask{CascadeService: service} +} + +// streamEvent sends a RegisterResponse via the provided callback. +func (task *CascadeRegistrationTask) streamEvent(eventType SupernodeEventType, msg, txHash string, send func(resp *RegisterResponse) error) { + _ = send(&RegisterResponse{EventType: eventType, Message: msg, TxHash: txHash}) +} diff --git a/supernode/cmd/init.go b/supernode/cmd/init.go index 6412d848..a9d01cb5 100644 --- a/supernode/cmd/init.go +++ b/supernode/cmd/init.go @@ -15,7 +15,7 @@ import ( "github.com/AlecAivazis/survey/v2" "github.com/LumeraProtocol/supernode/v2/pkg/keyring" "github.com/LumeraProtocol/supernode/v2/supernode/config" - consmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" + cKeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/spf13/cobra" ) @@ -36,15 +36,7 @@ var ( passphraseFile string ) -// Default configuration values -const ( - DefaultKeyringBackend = "test" - DefaultKeyName = "test-key" - DefaultSupernodeAddr = "0.0.0.0" - DefaultSupernodePort = 4444 - DefaultLumeraGRPC = "localhost:9090" - DefaultChainID = "testing" -) +// Default configuration values centralized in config package // InitInputs holds all user inputs for initialization type InitInputs struct { @@ -221,7 +213,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5a: Determine keyring backend (how keys are stored securely) // Options: 'test' (unencrypted), 'file' (encrypted file), 'os' (system keyring) - backend := DefaultKeyringBackend + backend := config.DefaultKeyringBackend if keyringBackendFlag != "" { backend = keyringBackendFlag } @@ -233,7 +225,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5b: Set the name for the cryptographic key // This name is used to reference the key in the keyring - keyName := DefaultKeyName + keyName := config.DefaultKeyName if keyNameFlag != "" { keyName = keyNameFlag @@ -245,7 +237,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5c: Configure the supernode's network binding address // Determines which network interface the supernode will listen on - supernodeAddr := DefaultSupernodeAddr + supernodeAddr := config.DefaultSupernodeHost if supernodeAddrFlag != "" { supernodeAddr = supernodeAddrFlag @@ -256,7 +248,7 @@ func gatherUserInputs() (InitInputs, error) { } // Step 5d: Set the port for supernode peer-to-peer communication - supernodePort := DefaultSupernodePort + supernodePort := int(config.DefaultSupernodePort) if supernodePortFlag != 0 { supernodePort = supernodePortFlag @@ -268,7 +260,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5e: Configure connection to the Lumera blockchain node // This is the GRPC endpoint for blockchain interactions - lumeraGRPC := DefaultLumeraGRPC + lumeraGRPC := config.DefaultLumeraGRPC if lumeraGrpcFlag != "" { lumeraGRPC = lumeraGrpcFlag @@ -280,7 +272,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5f: Set the blockchain network identifier // Must match the chain ID of the Lumera network you're connecting to - chainID := DefaultChainID + chainID := config.DefaultChainID if chainIDFlag != "" { chainID = chainIDFlag } @@ -419,7 +411,7 @@ func setupKeyring(keyName string, shouldRecover bool, mnemonic string) (string, } // recoverExistingKey handles the recovery of an existing key from mnemonic -func recoverExistingKey(kr consmoskeyring.Keyring, keyName, mnemonic string) (string, error) { +func recoverExistingKey(kr cKeyring.Keyring, keyName, mnemonic string) (string, error) { // Process and validate mnemonic using helper function processedMnemonic, err := processAndValidateMnemonic(mnemonic) if err != nil { @@ -444,7 +436,7 @@ func recoverExistingKey(kr consmoskeyring.Keyring, keyName, mnemonic string) (st } // createNewKey handles the creation of a new key -func createNewKey(kr consmoskeyring.Keyring, keyName string) (string, string, error) { +func createNewKey(kr cKeyring.Keyring, keyName string) (string, string, error) { // Generate mnemonic and create new account keyMnemonic, _, err := keyring.CreateNewAccount(kr, keyName) if err != nil { @@ -497,7 +489,7 @@ func promptKeyringBackend(passedBackend string) (string, error) { } backend = passedBackend } else { - backend = DefaultKeyringBackend + backend = config.DefaultKeyringBackend } prompt := &survey.Select{ Message: "Choose keyring backend:", @@ -565,24 +557,24 @@ func promptNetworkConfig(passedAddrs string, passedPort int, passedGRPC, passedC if passedAddrs != "" { supernodeAddr = passedAddrs } else { - supernodeAddr = DefaultSupernodeAddr + supernodeAddr = config.DefaultSupernodeHost } var port string if passedPort != 0 { port = fmt.Sprintf("%d", passedPort) } else { - port = fmt.Sprintf("%d", DefaultSupernodePort) + port = fmt.Sprintf("%d", config.DefaultSupernodePort) } if passedGRPC != "" { lumeraGrpcAddr = passedGRPC } else { - lumeraGrpcAddr = DefaultLumeraGRPC + lumeraGrpcAddr = config.DefaultLumeraGRPC } if passedChainID != "" { chainID = passedChainID } else { - chainID = DefaultChainID + chainID = config.DefaultChainID } // Supernode IP address @@ -618,7 +610,6 @@ func promptNetworkConfig(passedAddrs string, passedPort int, passedGRPC, passedC return "", 0, "", "", fmt.Errorf("invalid supernode port: %s", portStr) } - // Lumera GRPC address (full address with port) lumeraPrompt := &survey.Input{ Message: "Enter Lumera GRPC address:", diff --git a/supernode/cmd/service.go b/supernode/cmd/service.go index 8cd8708f..466eb657 100644 --- a/supernode/cmd/service.go +++ b/supernode/cmd/service.go @@ -1,33 +1,34 @@ package cmd import ( - "context" - "reflect" + "context" + "reflect" - "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" ) type service interface { - Run(context.Context) error + Run(context.Context) error } func RunServices(ctx context.Context, services ...service) error { - group, ctx := errgroup.WithContext(ctx) - - for _, service := range services { - service := service - - group.Go(func() error { - err := service.Run(ctx) - if err != nil { - logtrace.Error(ctx, "service stopped with an error", logtrace.Fields{"service": reflect.TypeOf(service).String(), "error": err}) - } else { - logtrace.Debug(ctx, "service stopped", logtrace.Fields{"service": reflect.TypeOf(service).String()}) - } - return err - }) - } - - return group.Wait() + group, ctx := errgroup.WithContext(ctx) + + for _, service := range services { + service := service + + group.Go(func() error { + err := service.Run(ctx) + if err != nil { + logtrace.Error(ctx, "service stopped with an error", logtrace.Fields{"service": reflect.TypeOf(service).String(), "error": err}) + } else { + logtrace.Debug(ctx, "service stopped", logtrace.Fields{"service": reflect.TypeOf(service).String()}) + } + return err + }) + } + + return group.Wait() } + diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 97569f2b..e1af0616 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -10,6 +10,7 @@ import ( "path/filepath" "strings" "syscall" + "time" "github.com/LumeraProtocol/supernode/v2/p2p" "github.com/LumeraProtocol/supernode/v2/p2p/kademlia/store/cloud" @@ -17,18 +18,22 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/codec" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + grpcserver "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/server" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" + cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/LumeraProtocol/supernode/v2/supernode/node/action/server/cascade" - "github.com/LumeraProtocol/supernode/v2/supernode/node/supernode/gateway" - "github.com/LumeraProtocol/supernode/v2/supernode/node/supernode/server" - cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" - supernodeService "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/services/verifier" + statusService "github.com/LumeraProtocol/supernode/v2/supernode/status" + "github.com/LumeraProtocol/supernode/v2/supernode/transport/gateway" + cascadeRPC "github.com/LumeraProtocol/supernode/v2/supernode/transport/grpc/cascade" + server "github.com/LumeraProtocol/supernode/v2/supernode/transport/grpc/status" + "github.com/LumeraProtocol/supernode/v2/supernode/verifier" cKeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/spf13/cobra" + + pbcascade "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" + + pbsupernode "github.com/LumeraProtocol/supernode/v2/gen/supernode" ) // startCmd represents the start command @@ -43,6 +48,9 @@ The supernode will connect to the Lumera network and begin participating in the // Create context with correlation ID for tracing ctx := logtrace.CtxWithCorrelationID(context.Background(), "supernode-start") + // Make the context cancelable for graceful shutdown + ctx, cancel := context.WithCancel(ctx) + defer cancel() // Log configuration info cfgFile := filepath.Join(baseDir, DefaultConfigFile) @@ -98,47 +106,40 @@ The supernode will connect to the Lumera network and begin participating in the logtrace.Fatal(ctx, "Failed to initialize P2P service", logtrace.Fields{"error": err.Error()}) } - // Initialize the supernode - supernodeInstance, err := NewSupernode(ctx, appConfig, kr, p2pService, rqStore, lumeraClient) - if err != nil { - logtrace.Fatal(ctx, "Failed to initialize supernode", logtrace.Fields{"error": err.Error()}) - } + // Supernode wrapper removed; components are managed directly // Configure cascade service cService := cascadeService.NewCascadeService( - &cascadeService.Config{ - Config: common.Config{ - SupernodeAccountAddress: appConfig.SupernodeConfig.Identity, - }, - RqFilesDir: appConfig.GetRaptorQFilesDir(), - }, + &cascadeService.Config{SupernodeAccountAddress: appConfig.SupernodeConfig.Identity, RqFilesDir: appConfig.GetRaptorQFilesDir()}, lumeraClient, - *p2pService, + p2pService, codec.NewRaptorQCodec(appConfig.GetRaptorQFilesDir()), rqStore, ) // Create cascade action server - cascadeActionServer := cascade.NewCascadeActionServer(cService) + cascadeActionServer := cascadeRPC.NewCascadeActionServer(cService) // Set the version in the status service package - supernodeService.Version = Version + statusService.Version = Version // Create supernode status service - statusService := supernodeService.NewSupernodeStatusService(*p2pService, lumeraClient, appConfig) + statusSvc := statusService.NewSupernodeStatusService(p2pService, lumeraClient, appConfig) // Create supernode server - supernodeServer := server.NewSupernodeServer(statusService) - - // Configure server - serverConfig := &server.Config{ - Identity: appConfig.SupernodeConfig.Identity, - ListenAddresses: appConfig.SupernodeConfig.Host, - Port: int(appConfig.SupernodeConfig.Port), - } - - // Create gRPC server - grpcServer, err := server.New(serverConfig, "service", kr, lumeraClient, cascadeActionServer, supernodeServer) + supernodeServer := server.NewSupernodeServer(statusSvc) + + // Create gRPC server (explicit args, no config struct) + grpcServer, err := server.New( + appConfig.SupernodeConfig.Identity, + appConfig.SupernodeConfig.Host, + int(appConfig.SupernodeConfig.Port), + "service", + kr, + lumeraClient, + grpcserver.ServiceDesc{Desc: &pbcascade.CascadeService_ServiceDesc, Service: cascadeActionServer}, + grpcserver.ServiceDesc{Desc: &pbsupernode.SupernodeService_ServiceDesc, Service: supernodeServer}, + ) if err != nil { logtrace.Fatal(ctx, "Failed to create gRPC server", logtrace.Fields{"error": err.Error()}) } @@ -168,24 +169,52 @@ The supernode will connect to the Lumera network and begin participating in the }() } - // Start the services - go func() { - if err := RunServices(ctx, grpcServer, cService, *p2pService, gatewayServer); err != nil { - logtrace.Error(ctx, "Service error", logtrace.Fields{"error": err.Error()}) - } - }() + // Start the services using the standard runner and capture exit + servicesErr := make(chan error, 1) + go func() { servicesErr <- RunServices(ctx, grpcServer, cService, p2pService, gatewayServer) }() // Set up signal handling for graceful shutdown sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) - // Wait for termination signal - sig := <-sigCh - logtrace.Debug(ctx, "Received signal, shutting down", logtrace.Fields{"signal": sig.String()}) + // Wait for either a termination signal or service exit + var triggeredBySignal bool + var runErr error + select { + case sig := <-sigCh: + triggeredBySignal = true + logtrace.Debug(ctx, "Received signal, shutting down", logtrace.Fields{"signal": sig.String()}) + case runErr = <-servicesErr: + if runErr != nil { + logtrace.Error(ctx, "Service error", logtrace.Fields{"error": runErr.Error()}) + } else { + logtrace.Debug(ctx, "Services exited", logtrace.Fields{}) + } + } + + // Cancel context to signal all services + cancel() - // Graceful shutdown - if err := supernodeInstance.Stop(ctx); err != nil { - logtrace.Error(ctx, "Error during shutdown", logtrace.Fields{"error": err.Error()}) + // Stop HTTP gateway and gRPC servers gracefully + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) + defer shutdownCancel() + + if err := gatewayServer.Stop(shutdownCtx); err != nil { + logtrace.Warn(ctx, "Gateway shutdown warning", logtrace.Fields{"error": err.Error()}) + } + grpcServer.Close() + + // Close Lumera client (preserve original log messages) + logtrace.Debug(ctx, "Closing Lumera client", logtrace.Fields{}) + if err := lumeraClient.Close(); err != nil { + logtrace.Error(ctx, "Error closing Lumera client", logtrace.Fields{"error": err.Error()}) + } + + // If we triggered shutdown by signal, wait for services to drain + if triggeredBySignal { + if err := <-servicesErr; err != nil { + logtrace.Error(ctx, "Service error on shutdown", logtrace.Fields{"error": err.Error()}) + } } return nil @@ -197,7 +226,7 @@ func init() { } // initP2PService initializes the P2P service -func initP2PService(ctx context.Context, config *config.Config, lumeraClient lumera.Client, kr cKeyring.Keyring, rqStore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore) (*p2p.P2P, error) { +func initP2PService(ctx context.Context, config *config.Config, lumeraClient lumera.Client, kr cKeyring.Keyring, rqStore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore) (p2p.P2P, error) { // Get the supernode address from the keyring keyInfo, err := kr.Key(config.SupernodeConfig.KeyName) if err != nil { @@ -218,5 +247,44 @@ func initP2PService(ctx context.Context, config *config.Config, lumeraClient lum return nil, fmt.Errorf("failed to initialize p2p service: %w", err) } - return &p2pService, nil + return p2pService, nil +} + +// initLumeraClient initializes the Lumera client based on configuration +func initLumeraClient(ctx context.Context, config *config.Config, kr cKeyring.Keyring) (lumera.Client, error) { + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + lumeraConfig, err := lumera.NewConfig(config.LumeraClientConfig.GRPCAddr, config.LumeraClientConfig.ChainID, config.SupernodeConfig.KeyName, kr) + if err != nil { + return nil, fmt.Errorf("failed to create Lumera config: %w", err) + } + return lumera.NewClient( + ctx, + lumeraConfig, + ) +} + +// initRQStore initializes the RaptorQ store for Cascade processing +func initRQStore(ctx context.Context, config *config.Config) (rqstore.Store, error) { + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + // Create RaptorQ store directory if it doesn't exist + rqDir := config.GetRaptorQFilesDir() + "/rq" + if err := os.MkdirAll(rqDir, 0700); err != nil { + return nil, fmt.Errorf("failed to create RQ store directory: %w", err) + } + + // Create the SQLite file path + rqStoreFile := rqDir + "/rqstore.db" + + logtrace.Debug(ctx, "Initializing RaptorQ store", logtrace.Fields{ + "file_path": rqStoreFile, + }) + + // Initialize RaptorQ store with SQLite + return rqstore.NewSQLiteRQStore(rqStoreFile) } diff --git a/supernode/cmd/supernode.go b/supernode/cmd/supernode.go deleted file mode 100644 index c0740fd0..00000000 --- a/supernode/cmd/supernode.go +++ /dev/null @@ -1,140 +0,0 @@ -package cmd - -import ( - "context" - "fmt" - "os" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" - "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/cosmos/cosmos-sdk/crypto/keyring" -) - -// Supernode represents a supernode in the Lumera network -type Supernode struct { - config *config.Config - lumeraClient lumera.Client - p2pService p2p.P2P - keyring keyring.Keyring - rqStore rqstore.Store - keyName string // String that represents the supernode account in keyring -} - -// NewSupernode creates a new supernode instance -func NewSupernode(ctx context.Context, config *config.Config, kr keyring.Keyring, - p2pClient *p2p.P2P, rqStore rqstore.Store, lumeraClient lumera.Client) (*Supernode, error) { - - if config == nil { - return nil, fmt.Errorf("config is nil") - } - - supernode := &Supernode{ - config: config, - lumeraClient: lumeraClient, - keyring: kr, - rqStore: rqStore, - p2pService: *p2pClient, - keyName: config.SupernodeConfig.KeyName, - } - - return supernode, nil -} - -// Start starts all supernode services -func (s *Supernode) Start(ctx context.Context) error { - // Verify that the key specified in config exists - keyInfo, err := s.keyring.Key(s.config.SupernodeConfig.KeyName) - if err != nil { - logtrace.Error(ctx, "Key not found in keyring", logtrace.Fields{ - "key_name": s.config.SupernodeConfig.KeyName, - "error": err.Error(), - }) - - // Provide helpful guidance - fmt.Printf("\nError: Key '%s' not found in keyring at %s\n", - s.config.SupernodeConfig.KeyName, s.config.GetKeyringDir()) - fmt.Println("\nPlease create the key first with one of these commands:") - fmt.Printf(" supernode keys add %s\n", s.config.SupernodeConfig.KeyName) - fmt.Printf(" supernode keys recover %s\n", s.config.SupernodeConfig.KeyName) - return fmt.Errorf("key not found") - } - - // Get the account address for logging - address, err := keyInfo.GetAddress() - if err != nil { - logtrace.Error(ctx, "Failed to get address from key", logtrace.Fields{ - "error": err.Error(), - }) - return err - } - - logtrace.Debug(ctx, "Found valid key in keyring", logtrace.Fields{ - "key_name": s.config.SupernodeConfig.KeyName, - "address": address.String(), - }) - - // Use the P2P service that was passed in via constructor - logtrace.Debug(ctx, "Starting P2P service", logtrace.Fields{}) - if err := s.p2pService.Run(ctx); err != nil { - return fmt.Errorf("p2p service error: %w", err) - } - - return nil -} - -// Stop stops all supernode services -func (s *Supernode) Stop(ctx context.Context) error { - // Close the Lumera client connection - if s.lumeraClient != nil { - logtrace.Debug(ctx, "Closing Lumera client", logtrace.Fields{}) - if err := s.lumeraClient.Close(); err != nil { - logtrace.Error(ctx, "Error closing Lumera client", logtrace.Fields{ - "error": err.Error(), - }) - } - } - - return nil -} - -// initLumeraClient initializes the Lumera client based on configuration -func initLumeraClient(ctx context.Context, config *config.Config, kr keyring.Keyring) (lumera.Client, error) { - if config == nil { - return nil, fmt.Errorf("config is nil") - } - - lumeraConfig, err := lumera.NewConfig(config.LumeraClientConfig.GRPCAddr, config.LumeraClientConfig.ChainID, config.SupernodeConfig.KeyName, kr) - if err != nil { - return nil, fmt.Errorf("failed to create Lumera config: %w", err) - } - return lumera.NewClient( - ctx, - lumeraConfig, - ) -} - -// initRQStore initializes the RaptorQ store for Cascade processing -func initRQStore(ctx context.Context, config *config.Config) (rqstore.Store, error) { - if config == nil { - return nil, fmt.Errorf("config is nil") - } - - // Create RaptorQ store directory if it doesn't exist - rqDir := config.GetRaptorQFilesDir() + "/rq" - if err := os.MkdirAll(rqDir, 0700); err != nil { - return nil, fmt.Errorf("failed to create RQ store directory: %w", err) - } - - // Create the SQLite file path - rqStoreFile := rqDir + "/rqstore.db" - - logtrace.Debug(ctx, "Initializing RaptorQ store", logtrace.Fields{ - "file_path": rqStoreFile, - }) - - // Initialize RaptorQ store with SQLite - return rqstore.NewSQLiteRQStore(rqStoreFile) -} diff --git a/supernode/config/defaults.go b/supernode/config/defaults.go new file mode 100644 index 00000000..1db1c3a3 --- /dev/null +++ b/supernode/config/defaults.go @@ -0,0 +1,16 @@ +package config + +// Centralized default values for configuration + +const ( + DefaultKeyringBackend = "test" + DefaultKeyringDir = "keys" + DefaultKeyName = "test-key" + DefaultSupernodeHost = "0.0.0.0" + DefaultSupernodePort = 4444 + DefaultP2PPort = 4445 + DefaultLumeraGRPC = "localhost:9090" + DefaultChainID = "testing" + DefaultRaptorQFilesDir = "raptorq_files" +) + diff --git a/supernode/config/save.go b/supernode/config/save.go index 5199fb81..d93e6cb8 100644 --- a/supernode/config/save.go +++ b/supernode/config/save.go @@ -32,21 +32,21 @@ func SaveConfig(config *Config, filename string) error { // CreateDefaultConfig creates a default configuration with the specified values func CreateDefaultConfig(keyName, identity, chainID string, keyringBackend, keyringDir string, passPlain, passEnv, passFile string) *Config { - // Set default values if keyringBackend == "" { - keyringBackend = "test" + keyringBackend = DefaultKeyringBackend } if keyringDir == "" { - keyringDir = "keys" + keyringDir = DefaultKeyringDir + } + if keyName == "" { + keyName = DefaultKeyName + } + if chainID == "" { + chainID = DefaultChainID } return &Config{ - SupernodeConfig: SupernodeConfig{ - KeyName: keyName, - Identity: identity, - Host: "0.0.0.0", - Port: 4444, - }, + SupernodeConfig: SupernodeConfig{KeyName: keyName, Identity: identity, Host: DefaultSupernodeHost, Port: DefaultSupernodePort}, KeyringConfig: KeyringConfig{ Backend: keyringBackend, Dir: keyringDir, @@ -54,16 +54,8 @@ func CreateDefaultConfig(keyName, identity, chainID string, keyringBackend, keyr PassEnv: passEnv, PassFile: passFile, }, - P2PConfig: P2PConfig{ - Port: 4445, - DataDir: "data/p2p", - }, - LumeraClientConfig: LumeraClientConfig{ - GRPCAddr: "localhost:9090", - ChainID: chainID, - }, - RaptorQConfig: RaptorQConfig{ - FilesDir: "raptorq_files", - }, + P2PConfig: P2PConfig{Port: DefaultP2PPort, DataDir: "data/p2p"}, + LumeraClientConfig: LumeraClientConfig{GRPCAddr: DefaultLumeraGRPC, ChainID: chainID}, + RaptorQConfig: RaptorQConfig{FilesDir: DefaultRaptorQFilesDir}, } } diff --git a/supernode/node/action/server/cascade/cascade_action_server.go b/supernode/node/action/server/cascade/cascade_action_server.go deleted file mode 100644 index 6a38b750..00000000 --- a/supernode/node/action/server/cascade/cascade_action_server.go +++ /dev/null @@ -1,367 +0,0 @@ -package cascade - -import ( - "encoding/hex" - "fmt" - "io" - "os" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - - "google.golang.org/grpc" -) - -type ActionServer struct { - pb.UnimplementedCascadeServiceServer - factory cascadeService.CascadeServiceFactory -} - -// NewCascadeActionServer creates a new CascadeActionServer with injected service -func NewCascadeActionServer(factory cascadeService.CascadeServiceFactory) *ActionServer { - return &ActionServer{factory: factory} -} - -// calculateOptimalChunkSize returns an optimal chunk size based on file size -// to balance throughput and memory usage -func calculateOptimalChunkSize(fileSize int64) int { - const ( - minChunkSize = 64 * 1024 // 64 KB minimum - maxChunkSize = 4 * 1024 * 1024 // 4 MB maximum for 1GB+ files - smallFileThreshold = 1024 * 1024 // 1 MB - mediumFileThreshold = 50 * 1024 * 1024 // 50 MB - largeFileThreshold = 500 * 1024 * 1024 // 500 MB - ) - - var chunkSize int - - switch { - case fileSize <= smallFileThreshold: - // For small files (up to 1MB), use 64KB chunks - chunkSize = minChunkSize - case fileSize <= mediumFileThreshold: - // For medium files (1MB-50MB), use 256KB chunks - chunkSize = 256 * 1024 - case fileSize <= largeFileThreshold: - // For large files (50MB-500MB), use 1MB chunks - chunkSize = 1024 * 1024 - default: - // For very large files (500MB+), use 4MB chunks for optimal throughput - chunkSize = maxChunkSize - } - - // Ensure chunk size is within bounds - if chunkSize < minChunkSize { - chunkSize = minChunkSize - } - if chunkSize > maxChunkSize { - chunkSize = maxChunkSize - } - - return chunkSize -} - -func (server *ActionServer) Desc() *grpc.ServiceDesc { - return &pb.CascadeService_ServiceDesc -} - -func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) error { - fields := logtrace.Fields{ - logtrace.FieldMethod: "Register", - logtrace.FieldModule: "CascadeActionServer", - } - - ctx := stream.Context() - logtrace.Debug(ctx, "client streaming request to upload cascade input data received", fields) - - const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit - - var ( - metadata *pb.Metadata - totalSize int - ) - - hasher, tempFile, tempFilePath, err := initializeHasherAndTempFile() - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to initialize hasher and temp file", fields) - return fmt.Errorf("initializing hasher and temp file: %w", err) - } - defer func(tempFile *os.File) { - err := tempFile.Close() - if err != nil && !errors.Is(err, os.ErrClosed) { - fields[logtrace.FieldError] = err.Error() - logtrace.Warn(ctx, "error closing temp file", fields) - } - }(tempFile) - - // Process incoming stream - for { - req, err := stream.Recv() - if err == io.EOF { - // End of stream - break - } - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "error receiving stream data", fields) - return fmt.Errorf("failed to receive stream data: %w", err) - } - - // Check which type of message we received - switch x := req.RequestType.(type) { - case *pb.RegisterRequest_Chunk: - if x.Chunk != nil { - - // hash the chunks - _, err := hasher.Write(x.Chunk.Data) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to write chunk to hasher", fields) - return fmt.Errorf("hashing error: %w", err) - } - - // write chunks to the file - _, err = tempFile.Write(x.Chunk.Data) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to write chunk to file", fields) - return fmt.Errorf("file write error: %w", err) - } - totalSize += len(x.Chunk.Data) - - // Validate total size doesn't exceed limit - if totalSize > maxFileSize { - fields[logtrace.FieldError] = "file size exceeds 1GB limit" - fields["total_size"] = totalSize - logtrace.Error(ctx, "upload rejected: file too large", fields) - return fmt.Errorf("file size %d exceeds maximum allowed size of 1GB", totalSize) - } - - logtrace.Debug(ctx, "received data chunk", logtrace.Fields{ - "chunk_size": len(x.Chunk.Data), - "total_size_so_far": totalSize, - }) - } - case *pb.RegisterRequest_Metadata: - // Store metadata - this should be the final message - metadata = x.Metadata - logtrace.Debug(ctx, "received metadata", logtrace.Fields{ - "task_id": metadata.TaskId, - "action_id": metadata.ActionId, - }) - } - } - - // Verify we received metadata - if metadata == nil { - logtrace.Error(ctx, "no metadata received in stream", fields) - return fmt.Errorf("no metadata received") - } - fields[logtrace.FieldTaskID] = metadata.GetTaskId() - fields[logtrace.FieldActionID] = metadata.GetActionId() - logtrace.Debug(ctx, "metadata received from action-sdk", fields) - - // Ensure all data is written to disk before calculating hash - if err := tempFile.Sync(); err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to sync temp file", fields) - return fmt.Errorf("failed to sync temp file: %w", err) - } - - hash := hasher.Sum(nil) - hashHex := hex.EncodeToString(hash) - fields[logtrace.FieldHashHex] = hashHex - logtrace.Debug(ctx, "final BLAKE3 hash generated", fields) - - targetPath, err := replaceTempDirWithTaskDir(metadata.GetTaskId(), tempFilePath, tempFile) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to replace temp dir with task dir", fields) - return fmt.Errorf("failed to replace temp dir with task dir: %w", err) - } - - // Process the complete data - task := server.factory.NewCascadeRegistrationTask() - err = task.Register(ctx, &cascadeService.RegisterRequest{ - TaskID: metadata.TaskId, - ActionID: metadata.ActionId, - DataHash: hash, - DataSize: totalSize, - FilePath: targetPath, - }, func(resp *cascadeService.RegisterResponse) error { - grpcResp := &pb.RegisterResponse{ - EventType: pb.SupernodeEventType(resp.EventType), - Message: resp.Message, - TxHash: resp.TxHash, - } - if err := stream.Send(grpcResp); err != nil { - logtrace.Error(ctx, "failed to send response to client", logtrace.Fields{ - logtrace.FieldError: err.Error(), - }) - return err - } - return nil - }) - - if err != nil { - logtrace.Error(ctx, "registration task failed", logtrace.Fields{ - logtrace.FieldError: err.Error(), - }) - return fmt.Errorf("registration failed: %w", err) - } - - logtrace.Debug(ctx, "cascade registration completed successfully", fields) - return nil -} - -func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeService_DownloadServer) error { - fields := logtrace.Fields{ - logtrace.FieldMethod: "Download", - logtrace.FieldModule: "CascadeActionServer", - logtrace.FieldActionID: req.GetActionId(), - } - - ctx := stream.Context() - logtrace.Debug(ctx, "download request received from client", fields) - - task := server.factory.NewCascadeRegistrationTask() - - // Authorization is enforced inside the task based on metadata.Public. - // If public, signature is skipped; if private, signature is required. - - var restoredFilePath string - var tmpDir string - - // Ensure tmpDir is cleaned up even if errors occur after retrieval - defer func() { - if tmpDir != "" { - if err := task.CleanupDownload(ctx, tmpDir); err != nil { - logtrace.Error(ctx, "error cleaning up the tmp dir", logtrace.Fields{logtrace.FieldError: err.Error()}) - } else { - logtrace.Debug(ctx, "tmp dir has been cleaned up", logtrace.Fields{"tmp_dir": tmpDir}) - } - } - }() - - err := task.Download(ctx, &cascadeService.DownloadRequest{ - ActionID: req.GetActionId(), - Signature: req.GetSignature(), - }, func(resp *cascadeService.DownloadResponse) error { - grpcResp := &pb.DownloadResponse{ - ResponseType: &pb.DownloadResponse_Event{ - Event: &pb.DownloadEvent{ - EventType: pb.SupernodeEventType(resp.EventType), - Message: resp.Message, - }, - }, - } - - if resp.FilePath != "" { - restoredFilePath = resp.FilePath - tmpDir = resp.DownloadedDir - } - - return stream.Send(grpcResp) - }) - - if err != nil { - logtrace.Error(ctx, "error occurred during download process", logtrace.Fields{ - logtrace.FieldError: err.Error(), - }) - return err - } - - if restoredFilePath == "" { - logtrace.Error(ctx, "no artefact file retrieved", fields) - return fmt.Errorf("no artefact to stream") - } - logtrace.Debug(ctx, "streaming artefact file in chunks", fields) - - // Open the restored file and stream directly from disk to avoid buffering entire file in memory - f, err := os.Open(restoredFilePath) - if err != nil { - logtrace.Error(ctx, "failed to open restored file", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - logtrace.Error(ctx, "failed to stat restored file", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - - // Calculate optimal chunk size based on file size - chunkSize := calculateOptimalChunkSize(fi.Size()) - logtrace.Debug(ctx, "calculated optimal chunk size for download", logtrace.Fields{ - "file_size": fi.Size(), - "chunk_size": chunkSize, - }) - - // Pre-read first chunk to avoid any delay between SERVE_READY and first data - buf := make([]byte, chunkSize) - n, readErr := f.Read(buf) - if readErr != nil && readErr != io.EOF { - return fmt.Errorf("chunked read failed: %w", readErr) - } - - // Announce: file is ready to be served to the client (right before first data) - if err := stream.Send(&pb.DownloadResponse{ - ResponseType: &pb.DownloadResponse_Event{ - Event: &pb.DownloadEvent{ - EventType: pb.SupernodeEventType_SERVE_READY, - Message: "File available for download", - }, - }, - }); err != nil { - logtrace.Error(ctx, "failed to send serve-ready event", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - - // Send pre-read first chunk if available - if n > 0 { - if err := stream.Send(&pb.DownloadResponse{ - ResponseType: &pb.DownloadResponse_Chunk{ - Chunk: &pb.DataChunk{Data: buf[:n]}, - }, - }); err != nil { - logtrace.Error(ctx, "failed to stream first chunk", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - } - - // If EOF after first read, we're done - if readErr == io.EOF { - logtrace.Debug(ctx, "completed streaming all chunks", fields) - return nil - } - - // Continue streaming remaining chunks - for { - n, readErr = f.Read(buf) - if n > 0 { - if err := stream.Send(&pb.DownloadResponse{ - ResponseType: &pb.DownloadResponse_Chunk{ - Chunk: &pb.DataChunk{Data: buf[:n]}, - }, - }); err != nil { - logtrace.Error(ctx, "failed to stream chunk", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - } - if readErr == io.EOF { - break - } - if readErr != nil { - return fmt.Errorf("chunked read failed: %w", readErr) - } - } - - // Cleanup is handled in deferred block above - - logtrace.Debug(ctx, "completed streaming all chunks", fields) - return nil -} diff --git a/supernode/node/action/server/cascade/cascade_action_server_mock.go b/supernode/node/action/server/cascade/cascade_action_server_mock.go deleted file mode 100644 index 3113dcb3..00000000 --- a/supernode/node/action/server/cascade/cascade_action_server_mock.go +++ /dev/null @@ -1,41 +0,0 @@ -package cascade - -import ( - "context" - "io" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" - "google.golang.org/grpc/metadata" -) - -// mockStream simulates pb.CascadeService_RegisterServer -type mockStream struct { - ctx context.Context - request []*pb.RegisterRequest - sent []*pb.RegisterResponse - pos int -} - -func (m *mockStream) Context() context.Context { - return m.ctx -} - -func (m *mockStream) Send(resp *pb.RegisterResponse) error { - m.sent = append(m.sent, resp) - return nil -} - -func (m *mockStream) Recv() (*pb.RegisterRequest, error) { - if m.pos >= len(m.request) { - return nil, io.EOF - } - req := m.request[m.pos] - m.pos++ - return req, nil -} - -func (m *mockStream) SetHeader(md metadata.MD) error { return nil } -func (m *mockStream) SendHeader(md metadata.MD) error { return nil } -func (m *mockStream) SetTrailer(md metadata.MD) {} -func (m *mockStream) SendMsg(_ any) error { return nil } -func (m *mockStream) RecvMsg(_ any) error { return nil } diff --git a/supernode/node/action/server/cascade/cascade_action_server_test.go b/supernode/node/action/server/cascade/cascade_action_server_test.go deleted file mode 100644 index ff2738b3..00000000 --- a/supernode/node/action/server/cascade/cascade_action_server_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package cascade - -import ( - "context" - "errors" - "testing" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - cascademocks "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/mocks" - - "github.com/stretchr/testify/assert" - "github.com/golang/mock/gomock" -) - -func TestRegister_Success(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockTask := cascademocks.NewMockCascadeTask(ctrl) - mockFactory := cascademocks.NewMockCascadeServiceFactory(ctrl) - - // Expect Register to be called with any input, respond via callback - mockTask.EXPECT().Register(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, req *cascade.RegisterRequest, send func(*cascade.RegisterResponse) error) error { - return send(&cascade.RegisterResponse{ - EventType: 1, - Message: "registration successful", - TxHash: "tx123", - }) - }, - ).Times(1) - - mockFactory.EXPECT().NewCascadeRegistrationTask().Return(mockTask).Times(1) - - server := NewCascadeActionServer(mockFactory) - - stream := &mockStream{ - ctx: context.Background(), - request: []*pb.RegisterRequest{ - {RequestType: &pb.RegisterRequest_Chunk{Chunk: &pb.DataChunk{Data: []byte("abc123")}}}, - {RequestType: &pb.RegisterRequest_Metadata{ - Metadata: &pb.Metadata{TaskId: "t1", ActionId: "a1"}, - }}, - }, - } - - err := server.Register(stream) - assert.NoError(t, err) - assert.Len(t, stream.sent, 1) - assert.Equal(t, "registration successful", stream.sent[0].Message) - assert.Equal(t, "tx123", stream.sent[0].TxHash) -} - -func TestRegister_Error_NoMetadata(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockFactory := cascademocks.NewMockCascadeServiceFactory(ctrl) - server := NewCascadeActionServer(mockFactory) - - stream := &mockStream{ - ctx: context.Background(), - request: []*pb.RegisterRequest{ - {RequestType: &pb.RegisterRequest_Chunk{Chunk: &pb.DataChunk{Data: []byte("abc123")}}}, - }, - } - - err := server.Register(stream) - assert.EqualError(t, err, "no metadata received") -} - -func TestRegister_Error_TaskFails(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockTask := cascademocks.NewMockCascadeTask(ctrl) - mockFactory := cascademocks.NewMockCascadeServiceFactory(ctrl) - - mockTask.EXPECT().Register(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("task failed")).Times(1) - mockFactory.EXPECT().NewCascadeRegistrationTask().Return(mockTask).Times(1) - - server := NewCascadeActionServer(mockFactory) - - stream := &mockStream{ - ctx: context.Background(), - request: []*pb.RegisterRequest{ - {RequestType: &pb.RegisterRequest_Chunk{Chunk: &pb.DataChunk{Data: []byte("abc123")}}}, - {RequestType: &pb.RegisterRequest_Metadata{ - Metadata: &pb.Metadata{TaskId: "t1", ActionId: "a1"}, - }}, - }, - } - - err := server.Register(stream) - assert.EqualError(t, err, "registration failed: task failed") -} diff --git a/supernode/node/action/server/cascade/helper.go b/supernode/node/action/server/cascade/helper.go deleted file mode 100644 index ec005707..00000000 --- a/supernode/node/action/server/cascade/helper.go +++ /dev/null @@ -1,39 +0,0 @@ -package cascade - -import ( - "fmt" - "lukechampine.com/blake3" - "os" - "path/filepath" - - "github.com/LumeraProtocol/supernode/v2/pkg/errors" -) - -func initializeHasherAndTempFile() (*blake3.Hasher, *os.File, string, error) { - hasher := blake3.New(32, nil) - - // Create a unique temp file to avoid collisions across concurrent calls - tempFile, err := os.CreateTemp("", "cascade-upload-*") - if err != nil { - return nil, nil, "", fmt.Errorf("could not create temp file: %w", err) - } - - return hasher, tempFile, tempFile.Name(), nil -} - -func replaceTempDirWithTaskDir(taskID, tempFilePath string, tempFile *os.File) (targetPath string, err error) { - if err := tempFile.Close(); err != nil && !errors.Is(err, os.ErrClosed) { - return "", fmt.Errorf("failed to close temp file: %w", err) - } - - targetDir := filepath.Join(os.TempDir(), taskID) - if err := os.MkdirAll(targetDir, 0755); err != nil { - return "", fmt.Errorf("could not create task directory: %w", err) - } - targetPath = filepath.Join(targetDir, fmt.Sprintf("uploaded-%s.dat", taskID)) - if err := os.Rename(tempFilePath, targetPath); err != nil { - return "", fmt.Errorf("could not move file to final location: %w", err) - } - - return targetPath, nil -} diff --git a/supernode/node/supernode/gateway/swagger.json b/supernode/node/supernode/gateway/swagger.json deleted file mode 100644 index 8e8b66fe..00000000 --- a/supernode/node/supernode/gateway/swagger.json +++ /dev/null @@ -1,279 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "supernode/service.proto", - "version": "version not set" - }, - "tags": [ - { - "name": "SupernodeService" - } - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/api/v1/services": { - "get": { - "operationId": "SupernodeService_ListServices", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/supernodeListServicesResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/rpcStatus" - } - } - }, - "tags": [ - "SupernodeService" - ] - } - }, - "/api/v1/status": { - "get": { - "operationId": "SupernodeService_GetStatus", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/supernodeStatusResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/rpcStatus" - } - } - }, - "parameters": [ - { - "name": "includeP2pMetrics", - "description": "Optional: include detailed P2P metrics in the response\nMaps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true", - "in": "query", - "required": false, - "type": "boolean" - } - ], - "tags": [ - "SupernodeService" - ] - } - } - }, - "definitions": { - "ResourcesCPU": { - "type": "object", - "properties": { - "usagePercent": { - "type": "number", - "format": "double", - "title": "CPU usage percentage (0-100)" - }, - "cores": { - "type": "integer", - "format": "int32", - "title": "Number of CPU cores" - } - } - }, - "ResourcesMemory": { - "type": "object", - "properties": { - "totalGb": { - "type": "number", - "format": "double", - "title": "Total memory in GB" - }, - "usedGb": { - "type": "number", - "format": "double", - "title": "Used memory in GB" - }, - "availableGb": { - "type": "number", - "format": "double", - "title": "Available memory in GB" - }, - "usagePercent": { - "type": "number", - "format": "double", - "title": "Memory usage percentage (0-100)" - } - } - }, - "ResourcesStorage": { - "type": "object", - "properties": { - "path": { - "type": "string", - "title": "Storage path being monitored" - }, - "totalBytes": { - "type": "string", - "format": "uint64" - }, - "usedBytes": { - "type": "string", - "format": "uint64" - }, - "availableBytes": { - "type": "string", - "format": "uint64" - }, - "usagePercent": { - "type": "number", - "format": "double", - "title": "Storage usage percentage (0-100)" - } - } - }, - "StatusResponseNetwork": { - "type": "object", - "properties": { - "peersCount": { - "type": "integer", - "format": "int32", - "title": "Number of connected peers in P2P network" - }, - "peerAddresses": { - "type": "array", - "items": { - "type": "string" - }, - "title": "List of connected peer addresses (optional, may be empty for privacy)" - } - }, - "title": "Network information" - }, - "StatusResponseResources": { - "type": "object", - "properties": { - "cpu": { - "$ref": "#/definitions/ResourcesCPU" - }, - "memory": { - "$ref": "#/definitions/ResourcesMemory" - }, - "storageVolumes": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/ResourcesStorage" - } - }, - "hardwareSummary": { - "type": "string", - "title": "Formatted hardware summary (e.g., \"8 cores / 32GB RAM\")" - } - }, - "title": "System resource information" - }, - - "protobufAny": { - "type": "object", - "properties": { - "@type": { - "type": "string" - } - }, - "additionalProperties": {} - }, - "rpcStatus": { - "type": "object", - "properties": { - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "supernodeListServicesResponse": { - "type": "object", - "properties": { - "services": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/supernodeServiceInfo" - } - }, - "count": { - "type": "integer", - "format": "int32" - } - } - }, - "supernodeServiceInfo": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "methods": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "supernodeStatusResponse": { - "type": "object", - "properties": { - "version": { - "type": "string", - "title": "Supernode version" - }, - "uptimeSeconds": { - "type": "string", - "format": "uint64", - "title": "Uptime in seconds" - }, - "resources": { - "$ref": "#/definitions/StatusResponseResources" - }, - "registeredServices": { - "type": "array", - "items": { - "type": "string" - }, - "title": "All registered/available services" - }, - "network": { - "$ref": "#/definitions/StatusResponseNetwork", - "title": "P2P network information" - }, - "rank": { - "type": "integer", - "format": "int32", - "title": "Rank in the top supernodes list (0 if not in top list)" - }, - "ipAddress": { - "type": "string", - "title": "Supernode IP address with port (e.g., \"192.168.1.1:4445\")" - } - }, - "title": "The StatusResponse represents system status with clear organization" - } - } -} diff --git a/supernode/node/supernode/server/config.go b/supernode/node/supernode/server/config.go deleted file mode 100644 index 4e9d0f23..00000000 --- a/supernode/node/supernode/server/config.go +++ /dev/null @@ -1,19 +0,0 @@ -package server - -const ( - defaultPort = 4444 -) - -// Config contains settings of the supernode server. -type Config struct { - Identity string - ListenAddresses string - Port int -} - -// NewConfig returns a new Config instance. -func NewConfig() *Config { - return &Config{ - Port: defaultPort, - } -} diff --git a/supernode/node/supernode/server/config_test.go b/supernode/node/supernode/server/config_test.go deleted file mode 100644 index 33e06f68..00000000 --- a/supernode/node/supernode/server/config_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package server - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNewConfig_Defaults(t *testing.T) { - cfg := NewConfig() - - assert.NotNil(t, cfg) - assert.Equal(t, "", cfg.ListenAddresses, "default listen address should be empty") - assert.Equal(t, 4444, cfg.Port, "default port should be 4444") - assert.Equal(t, "", cfg.Identity, "default identity should be empty") -} diff --git a/supernode/node/supernode/server/mock_keyring.go b/supernode/node/supernode/server/mock_keyring.go deleted file mode 100644 index 85cb9910..00000000 --- a/supernode/node/supernode/server/mock_keyring.go +++ /dev/null @@ -1,379 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/cosmos/cosmos-sdk/crypto/keyring (interfaces: Keyring) - -// Package mock_keyring is a generated GoMock package. -package server - -import ( - reflect "reflect" - - keyring "github.com/cosmos/cosmos-sdk/crypto/keyring" - types "github.com/cosmos/cosmos-sdk/crypto/types" - types0 "github.com/cosmos/cosmos-sdk/types" - signing "github.com/cosmos/cosmos-sdk/types/tx/signing" - gomock "go.uber.org/mock/gomock" -) - -// MockKeyring is a mock of Keyring interface. -type MockKeyring struct { - ctrl *gomock.Controller - recorder *MockKeyringMockRecorder -} - -// MockKeyringMockRecorder is the mock recorder for MockKeyring. -type MockKeyringMockRecorder struct { - mock *MockKeyring -} - -// NewMockKeyring creates a new mock instance. -func NewMockKeyring(ctrl *gomock.Controller) *MockKeyring { - mock := &MockKeyring{ctrl: ctrl} - mock.recorder = &MockKeyringMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockKeyring) EXPECT() *MockKeyringMockRecorder { - return m.recorder -} - -// Backend mocks base method. -func (m *MockKeyring) Backend() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Backend") - ret0, _ := ret[0].(string) - return ret0 -} - -// Backend indicates an expected call of Backend. -func (mr *MockKeyringMockRecorder) Backend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Backend", reflect.TypeOf((*MockKeyring)(nil).Backend)) -} - -// Delete mocks base method. -func (m *MockKeyring) Delete(arg0 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockKeyringMockRecorder) Delete(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKeyring)(nil).Delete), arg0) -} - -// DeleteByAddress mocks base method. -func (m *MockKeyring) DeleteByAddress(arg0 types0.Address) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteByAddress", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteByAddress indicates an expected call of DeleteByAddress. -func (mr *MockKeyringMockRecorder) DeleteByAddress(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteByAddress", reflect.TypeOf((*MockKeyring)(nil).DeleteByAddress), arg0) -} - -// ExportPrivKeyArmor mocks base method. -func (m *MockKeyring) ExportPrivKeyArmor(arg0, arg1 string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportPrivKeyArmor", arg0, arg1) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExportPrivKeyArmor indicates an expected call of ExportPrivKeyArmor. -func (mr *MockKeyringMockRecorder) ExportPrivKeyArmor(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPrivKeyArmor", reflect.TypeOf((*MockKeyring)(nil).ExportPrivKeyArmor), arg0, arg1) -} - -// ExportPrivKeyArmorByAddress mocks base method. -func (m *MockKeyring) ExportPrivKeyArmorByAddress(arg0 types0.Address, arg1 string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportPrivKeyArmorByAddress", arg0, arg1) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExportPrivKeyArmorByAddress indicates an expected call of ExportPrivKeyArmorByAddress. -func (mr *MockKeyringMockRecorder) ExportPrivKeyArmorByAddress(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPrivKeyArmorByAddress", reflect.TypeOf((*MockKeyring)(nil).ExportPrivKeyArmorByAddress), arg0, arg1) -} - -// ExportPubKeyArmor mocks base method. -func (m *MockKeyring) ExportPubKeyArmor(arg0 string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportPubKeyArmor", arg0) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExportPubKeyArmor indicates an expected call of ExportPubKeyArmor. -func (mr *MockKeyringMockRecorder) ExportPubKeyArmor(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPubKeyArmor", reflect.TypeOf((*MockKeyring)(nil).ExportPubKeyArmor), arg0) -} - -// ExportPubKeyArmorByAddress mocks base method. -func (m *MockKeyring) ExportPubKeyArmorByAddress(arg0 types0.Address) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportPubKeyArmorByAddress", arg0) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExportPubKeyArmorByAddress indicates an expected call of ExportPubKeyArmorByAddress. -func (mr *MockKeyringMockRecorder) ExportPubKeyArmorByAddress(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPubKeyArmorByAddress", reflect.TypeOf((*MockKeyring)(nil).ExportPubKeyArmorByAddress), arg0) -} - -// ImportPrivKey mocks base method. -func (m *MockKeyring) ImportPrivKey(arg0, arg1, arg2 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImportPrivKey", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ImportPrivKey indicates an expected call of ImportPrivKey. -func (mr *MockKeyringMockRecorder) ImportPrivKey(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportPrivKey", reflect.TypeOf((*MockKeyring)(nil).ImportPrivKey), arg0, arg1, arg2) -} - -// ImportPrivKeyHex mocks base method. -func (m *MockKeyring) ImportPrivKeyHex(arg0, arg1, arg2 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImportPrivKeyHex", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ImportPrivKeyHex indicates an expected call of ImportPrivKeyHex. -func (mr *MockKeyringMockRecorder) ImportPrivKeyHex(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportPrivKeyHex", reflect.TypeOf((*MockKeyring)(nil).ImportPrivKeyHex), arg0, arg1, arg2) -} - -// ImportPubKey mocks base method. -func (m *MockKeyring) ImportPubKey(arg0, arg1 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImportPubKey", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ImportPubKey indicates an expected call of ImportPubKey. -func (mr *MockKeyringMockRecorder) ImportPubKey(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportPubKey", reflect.TypeOf((*MockKeyring)(nil).ImportPubKey), arg0, arg1) -} - -// Key mocks base method. -func (m *MockKeyring) Key(arg0 string) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Key", arg0) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Key indicates an expected call of Key. -func (mr *MockKeyringMockRecorder) Key(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Key", reflect.TypeOf((*MockKeyring)(nil).Key), arg0) -} - -// KeyByAddress mocks base method. -func (m *MockKeyring) KeyByAddress(arg0 types0.Address) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "KeyByAddress", arg0) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// KeyByAddress indicates an expected call of KeyByAddress. -func (mr *MockKeyringMockRecorder) KeyByAddress(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeyByAddress", reflect.TypeOf((*MockKeyring)(nil).KeyByAddress), arg0) -} - -// List mocks base method. -func (m *MockKeyring) List() ([]*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "List") - ret0, _ := ret[0].([]*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// List indicates an expected call of List. -func (mr *MockKeyringMockRecorder) List() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockKeyring)(nil).List)) -} - -// MigrateAll mocks base method. -func (m *MockKeyring) MigrateAll() ([]*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MigrateAll") - ret0, _ := ret[0].([]*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// MigrateAll indicates an expected call of MigrateAll. -func (mr *MockKeyringMockRecorder) MigrateAll() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MigrateAll", reflect.TypeOf((*MockKeyring)(nil).MigrateAll)) -} - -// NewAccount mocks base method. -func (m *MockKeyring) NewAccount(arg0, arg1, arg2, arg3 string, arg4 keyring.SignatureAlgo) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewAccount", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewAccount indicates an expected call of NewAccount. -func (mr *MockKeyringMockRecorder) NewAccount(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAccount", reflect.TypeOf((*MockKeyring)(nil).NewAccount), arg0, arg1, arg2, arg3, arg4) -} - -// NewMnemonic mocks base method. -func (m *MockKeyring) NewMnemonic(arg0 string, arg1 keyring.Language, arg2, arg3 string, arg4 keyring.SignatureAlgo) (*keyring.Record, string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewMnemonic", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(string) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// NewMnemonic indicates an expected call of NewMnemonic. -func (mr *MockKeyringMockRecorder) NewMnemonic(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewMnemonic", reflect.TypeOf((*MockKeyring)(nil).NewMnemonic), arg0, arg1, arg2, arg3, arg4) -} - -// Rename mocks base method. -func (m *MockKeyring) Rename(arg0, arg1 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Rename", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Rename indicates an expected call of Rename. -func (mr *MockKeyringMockRecorder) Rename(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Rename", reflect.TypeOf((*MockKeyring)(nil).Rename), arg0, arg1) -} - -// SaveLedgerKey mocks base method. -func (m *MockKeyring) SaveLedgerKey(arg0 string, arg1 keyring.SignatureAlgo, arg2 string, arg3, arg4, arg5 uint32) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SaveLedgerKey", arg0, arg1, arg2, arg3, arg4, arg5) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SaveLedgerKey indicates an expected call of SaveLedgerKey. -func (mr *MockKeyringMockRecorder) SaveLedgerKey(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveLedgerKey", reflect.TypeOf((*MockKeyring)(nil).SaveLedgerKey), arg0, arg1, arg2, arg3, arg4, arg5) -} - -// SaveMultisig mocks base method. -func (m *MockKeyring) SaveMultisig(arg0 string, arg1 types.PubKey) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SaveMultisig", arg0, arg1) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SaveMultisig indicates an expected call of SaveMultisig. -func (mr *MockKeyringMockRecorder) SaveMultisig(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveMultisig", reflect.TypeOf((*MockKeyring)(nil).SaveMultisig), arg0, arg1) -} - -// SaveOfflineKey mocks base method. -func (m *MockKeyring) SaveOfflineKey(arg0 string, arg1 types.PubKey) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SaveOfflineKey", arg0, arg1) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SaveOfflineKey indicates an expected call of SaveOfflineKey. -func (mr *MockKeyringMockRecorder) SaveOfflineKey(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveOfflineKey", reflect.TypeOf((*MockKeyring)(nil).SaveOfflineKey), arg0, arg1) -} - -// Sign mocks base method. -func (m *MockKeyring) Sign(arg0 string, arg1 []byte, arg2 signing.SignMode) ([]byte, types.PubKey, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Sign", arg0, arg1, arg2) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(types.PubKey) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// Sign indicates an expected call of Sign. -func (mr *MockKeyringMockRecorder) Sign(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sign", reflect.TypeOf((*MockKeyring)(nil).Sign), arg0, arg1, arg2) -} - -// SignByAddress mocks base method. -func (m *MockKeyring) SignByAddress(arg0 types0.Address, arg1 []byte, arg2 signing.SignMode) ([]byte, types.PubKey, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SignByAddress", arg0, arg1, arg2) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(types.PubKey) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// SignByAddress indicates an expected call of SignByAddress. -func (mr *MockKeyringMockRecorder) SignByAddress(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignByAddress", reflect.TypeOf((*MockKeyring)(nil).SignByAddress), arg0, arg1, arg2) -} - -// SupportedAlgorithms mocks base method. -func (m *MockKeyring) SupportedAlgorithms() (keyring.SigningAlgoList, keyring.SigningAlgoList) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SupportedAlgorithms") - ret0, _ := ret[0].(keyring.SigningAlgoList) - ret1, _ := ret[1].(keyring.SigningAlgoList) - return ret0, ret1 -} - -// SupportedAlgorithms indicates an expected call of SupportedAlgorithms. -func (mr *MockKeyringMockRecorder) SupportedAlgorithms() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SupportedAlgorithms", reflect.TypeOf((*MockKeyring)(nil).SupportedAlgorithms)) -} diff --git a/supernode/node/supernode/server/server_test.go b/supernode/node/supernode/server/server_test.go deleted file mode 100644 index 7803bcce..00000000 --- a/supernode/node/supernode/server/server_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package server - -import ( - "testing" - - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/stretchr/testify/assert" - gomock "go.uber.org/mock/gomock" - - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" -) - -// --- Mock service implementing server.service --- -type mockService struct{} - -func (m *mockService) Desc() *grpc.ServiceDesc { - return &grpc.ServiceDesc{ - ServiceName: "test.Service", - HandlerType: (*interface{})(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{}, - } -} - -func TestNewServer_WithValidConfig(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockKeyring := NewMockKeyring(ctl) - mockLumeraClient := lumera.NewMockClient(ctl) - - cfg := NewConfig() - cfg.ListenAddresses = "127.0.0.1" - s, err := New(cfg, "supernode-test", mockKeyring, mockLumeraClient, &mockService{}) - assert.NoError(t, err) - assert.NotNil(t, s) -} - -func TestNewServer_WithNilConfig(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockKeyring := NewMockKeyring(ctl) - mockLumeraClient := lumera.NewMockClient(ctl) - - s, err := New(nil, "supernode-test", mockKeyring, mockLumeraClient) - assert.Nil(t, s) - assert.EqualError(t, err, "config is nil") -} - -func TestSetServiceStatusAndClose(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockKeyring := NewMockKeyring(ctl) - mockLumeraClient := lumera.NewMockClient(ctl) - - cfg := NewConfig() - cfg.ListenAddresses = "127.0.0.1" - s, _ := New(cfg, "test", mockKeyring, mockLumeraClient, &mockService{}) - _ = s.setupGRPCServer() - - s.SetServiceStatus("test.Service", grpc_health_v1.HealthCheckResponse_SERVING) - s.Close() - - // No assertion — success is no panic / crash on shutdown -} diff --git a/supernode/node/supernode/server/status_server.go b/supernode/node/supernode/server/status_server.go deleted file mode 100644 index 7e01410d..00000000 --- a/supernode/node/supernode/server/status_server.go +++ /dev/null @@ -1,204 +0,0 @@ -package server - -import ( - "context" - - "google.golang.org/grpc" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" -) - -// SupernodeServer implements the SupernodeService gRPC service -type SupernodeServer struct { - pb.UnimplementedSupernodeServiceServer - statusService *supernode.SupernodeStatusService - services []ServiceInfo // Store service descriptors -} - -// ServiceInfo holds information about a registered service -type ServiceInfo struct { - Name string - Methods []string -} - -// NewSupernodeServer creates a new SupernodeServer -func NewSupernodeServer(statusService *supernode.SupernodeStatusService) *SupernodeServer { - return &SupernodeServer{ - statusService: statusService, - services: []ServiceInfo{}, - } -} - -// RegisterService adds a service to the known services list -func (s *SupernodeServer) RegisterService(serviceName string, desc *grpc.ServiceDesc) { - methods := make([]string, 0, len(desc.Methods)+len(desc.Streams)) - - // Add unary methods - for _, method := range desc.Methods { - methods = append(methods, method.MethodName) - } - - // Add streaming methods - for _, stream := range desc.Streams { - methods = append(methods, stream.StreamName) - } - - s.services = append(s.services, ServiceInfo{ - Name: serviceName, - Methods: methods, - }) -} - -// GetStatus implements SupernodeService.GetStatus -func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) { - // Get status from the common service; gate P2P metrics by request flag - status, err := s.statusService.GetStatus(ctx, req.GetIncludeP2PMetrics()) - if err != nil { - return nil, err - } - - // Convert to protobuf response - response := &pb.StatusResponse{ - Version: status.Version, - UptimeSeconds: status.UptimeSeconds, - Resources: &pb.StatusResponse_Resources{ - Cpu: &pb.StatusResponse_Resources_CPU{ - UsagePercent: status.Resources.CPU.UsagePercent, - Cores: status.Resources.CPU.Cores, - }, - Memory: &pb.StatusResponse_Resources_Memory{ - TotalGb: status.Resources.Memory.TotalGB, - UsedGb: status.Resources.Memory.UsedGB, - AvailableGb: status.Resources.Memory.AvailableGB, - UsagePercent: status.Resources.Memory.UsagePercent, - }, - StorageVolumes: make([]*pb.StatusResponse_Resources_Storage, 0, len(status.Resources.Storage)), - HardwareSummary: status.Resources.HardwareSummary, - }, - RegisteredServices: nil, - Network: &pb.StatusResponse_Network{ - PeersCount: status.Network.PeersCount, - PeerAddresses: status.Network.PeerAddresses, - }, - Rank: status.Rank, - IpAddress: status.IPAddress, - } - - // Convert storage information - for _, storage := range status.Resources.Storage { - storageInfo := &pb.StatusResponse_Resources_Storage{ - Path: storage.Path, - TotalBytes: storage.TotalBytes, - UsedBytes: storage.UsedBytes, - AvailableBytes: storage.AvailableBytes, - UsagePercent: storage.UsagePercent, - } - response.Resources.StorageVolumes = append(response.Resources.StorageVolumes, storageInfo) - } - - // Populate registered services from server registrations - if len(s.services) > 0 { - response.RegisteredServices = make([]string, 0, len(s.services)+1) - for _, svc := range s.services { - response.RegisteredServices = append(response.RegisteredServices, svc.Name) - } - // Also include health service - response.RegisteredServices = append(response.RegisteredServices, "grpc.health.v1.Health") - } - - // Map optional P2P metrics - if req.GetIncludeP2PMetrics() { - pm := status.P2PMetrics - pbdht := &pb.StatusResponse_P2PMetrics_DhtMetrics{} - for _, p := range pm.DhtMetrics.StoreSuccessRecent { - pbdht.StoreSuccessRecent = append(pbdht.StoreSuccessRecent, &pb.StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{ - TimeUnix: p.TimeUnix, - Requests: p.Requests, - Successful: p.Successful, - SuccessRate: p.SuccessRate, - }) - } - for _, p := range pm.DhtMetrics.BatchRetrieveRecent { - pbdht.BatchRetrieveRecent = append(pbdht.BatchRetrieveRecent, &pb.StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{ - TimeUnix: p.TimeUnix, - Keys: p.Keys, - Required: p.Required, - FoundLocal: p.FoundLocal, - FoundNetwork: p.FoundNetwork, - DurationMs: p.DurationMS, - }) - } - pbdht.HotPathBannedSkips = pm.DhtMetrics.HotPathBannedSkips - pbdht.HotPathBanIncrements = pm.DhtMetrics.HotPathBanIncrements - - pbpm := &pb.StatusResponse_P2PMetrics{ - DhtMetrics: pbdht, - NetworkHandleMetrics: map[string]*pb.StatusResponse_P2PMetrics_HandleCounters{}, - ConnPoolMetrics: map[string]int64{}, - BanList: []*pb.StatusResponse_P2PMetrics_BanEntry{}, - Database: &pb.StatusResponse_P2PMetrics_DatabaseStats{}, - Disk: &pb.StatusResponse_P2PMetrics_DiskStatus{}, - } - - // Network handle metrics - for k, v := range pm.NetworkHandleMetrics { - pbpm.NetworkHandleMetrics[k] = &pb.StatusResponse_P2PMetrics_HandleCounters{ - Total: v.Total, - Success: v.Success, - Failure: v.Failure, - Timeout: v.Timeout, - } - } - // Conn pool metrics - for k, v := range pm.ConnPoolMetrics { - pbpm.ConnPoolMetrics[k] = v - } - // Ban list - for _, b := range pm.BanList { - pbpm.BanList = append(pbpm.BanList, &pb.StatusResponse_P2PMetrics_BanEntry{ - Id: b.ID, - Ip: b.IP, - Port: b.Port, - Count: b.Count, - CreatedAtUnix: b.CreatedAtUnix, - AgeSeconds: b.AgeSeconds, - }) - } - // Database - pbpm.Database.P2PDbSizeMb = pm.Database.P2PDBSizeMB - pbpm.Database.P2PDbRecordsCount = pm.Database.P2PDBRecordsCount - // Disk - pbpm.Disk.AllMb = pm.Disk.AllMB - pbpm.Disk.UsedMb = pm.Disk.UsedMB - pbpm.Disk.FreeMb = pm.Disk.FreeMB - - response.P2PMetrics = pbpm - } - - // Codec configuration removed - - return response, nil -} - -// ListServices implements SupernodeService.ListServices -func (s *SupernodeServer) ListServices(ctx context.Context, req *pb.ListServicesRequest) (*pb.ListServicesResponse, error) { - // Convert internal ServiceInfo to protobuf ServiceInfo - services := make([]*pb.ServiceInfo, 0, len(s.services)) - for _, svc := range s.services { - services = append(services, &pb.ServiceInfo{ - Name: svc.Name, - Methods: svc.Methods, - }) - } - - return &pb.ListServicesResponse{ - Services: services, - Count: int32(len(services)), - }, nil -} - -// Desc implements the service interface for gRPC service registration -func (s *SupernodeServer) Desc() *grpc.ServiceDesc { - return &pb.SupernodeService_ServiceDesc -} diff --git a/supernode/node/supernode/server/status_server_test.go b/supernode/node/supernode/server/status_server_test.go deleted file mode 100644 index e4cb9edc..00000000 --- a/supernode/node/supernode/server/status_server_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package server - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" -) - -func TestSupernodeServer_GetStatus(t *testing.T) { - ctx := context.Background() - - // Create status service - statusService := supernode.NewSupernodeStatusService(nil, nil, nil) - - // Create server - server := NewSupernodeServer(statusService) - - // Test with empty service - resp, err := server.GetStatus(ctx, &pb.StatusRequest{}) - require.NoError(t, err) - assert.NotNil(t, resp) - - // Check basic structure - assert.NotNil(t, resp.Resources) - assert.NotNil(t, resp.Resources.Cpu) - assert.NotNil(t, resp.Resources.Memory) - // Registered services populated from server registry - - // Check version field - assert.NotEmpty(t, resp.Version) - - // Check uptime field - assert.True(t, resp.UptimeSeconds >= 0) - - // Check CPU metrics - assert.True(t, resp.Resources.Cpu.UsagePercent >= 0) - assert.True(t, resp.Resources.Cpu.UsagePercent <= 100) - assert.True(t, resp.Resources.Cpu.Cores >= 0) - - // Check Memory metrics (now in GB) - assert.True(t, resp.Resources.Memory.TotalGb > 0) - assert.True(t, resp.Resources.Memory.UsagePercent >= 0) - assert.True(t, resp.Resources.Memory.UsagePercent <= 100) - - // Check hardware summary - if resp.Resources.Cpu.Cores > 0 && resp.Resources.Memory.TotalGb > 0 { - assert.NotEmpty(t, resp.Resources.HardwareSummary) - } - - // Check Storage (should have default root filesystem) - assert.NotEmpty(t, resp.Resources.StorageVolumes) - assert.Equal(t, "/", resp.Resources.StorageVolumes[0].Path) - - // Should have no registered services initially (no services registered) - assert.Equal(t, 0, len(resp.RegisteredServices)) - - // Check new fields have default values - assert.NotNil(t, resp.Network) - assert.Equal(t, int32(0), resp.Network.PeersCount) - assert.Empty(t, resp.Network.PeerAddresses) - assert.Equal(t, int32(0), resp.Rank) - assert.Empty(t, resp.IpAddress) -} - -// Removed: task tracking tests; status no longer returns running tasks - -func TestSupernodeServer_Desc(t *testing.T) { - statusService := supernode.NewSupernodeStatusService(nil, nil, nil) - server := NewSupernodeServer(statusService) - - desc := server.Desc() - assert.NotNil(t, desc) - assert.Equal(t, "supernode.SupernodeService", desc.ServiceName) -} diff --git a/supernode/services/cascade/adaptors/lumera.go b/supernode/services/cascade/adaptors/lumera.go deleted file mode 100644 index f5e3b52e..00000000 --- a/supernode/services/cascade/adaptors/lumera.go +++ /dev/null @@ -1,81 +0,0 @@ -package adaptors - -import ( - "context" - "fmt" - - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - sdktx "github.com/cosmos/cosmos-sdk/types/tx" -) - -//go:generate mockgen -destination=mocks/lumera_mock.go -package=cascadeadaptormocks -source=lumera.go - -// LumeraClient defines the interface for interacting with Lumera chain data during cascade registration. -type LumeraClient interface { - // SupernodeModule - GetTopSupernodes(ctx context.Context, height uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) - - // Action Module - GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) - FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) - SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) - GetActionFee(ctx context.Context, dataSize string) (*actiontypes.QueryGetActionFeeResponse, error) - // Auth - Verify(ctx context.Context, creator string, file []byte, sigBytes []byte) error -} - -// Client is the concrete implementation used in production. -type Client struct { - lc lumera.Client -} - -func NewLumeraClient(client lumera.Client) LumeraClient { - return &Client{ - lc: client, - } -} - -func (c *Client) GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) { - return c.lc.Action().GetAction(ctx, actionID) -} - -func (c *Client) GetActionFee(ctx context.Context, dataSize string) (*actiontypes.QueryGetActionFeeResponse, error) { - return c.lc.Action().GetActionFee(ctx, dataSize) -} - -func (c *Client) FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) { - resp, err := c.lc.ActionMsg().FinalizeCascadeAction(ctx, actionID, rqids) - if err != nil { - // Preserve underlying gRPC status/details - return nil, fmt.Errorf("finalize cascade action broadcast failed: %w", err) - } - - // Surface chain-level failures (non-zero code) with rich context - if resp != nil && resp.TxResponse != nil && resp.TxResponse.Code != 0 { - return nil, fmt.Errorf( - "tx failed: code=%d codespace=%s height=%d gas_wanted=%d gas_used=%d raw_log=%s", - resp.TxResponse.Code, - resp.TxResponse.Codespace, - resp.TxResponse.Height, - resp.TxResponse.GasWanted, - resp.TxResponse.GasUsed, - resp.TxResponse.RawLog, - ) - } - - return resp, nil -} - -func (c *Client) SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) { - return c.lc.ActionMsg().SimulateFinalizeCascadeAction(ctx, actionID, rqids) -} - -func (c *Client) GetTopSupernodes(ctx context.Context, height uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) { - return c.lc.SuperNode().GetTopSuperNodesForBlock(ctx, height) -} - -func (c *Client) Verify(ctx context.Context, creator string, file []byte, sigBytes []byte) error { - return c.lc.Auth().Verify(ctx, creator, file, sigBytes) -} diff --git a/supernode/services/cascade/adaptors/mocks/lumera_mock.go b/supernode/services/cascade/adaptors/mocks/lumera_mock.go deleted file mode 100644 index 29cdd48f..00000000 --- a/supernode/services/cascade/adaptors/mocks/lumera_mock.go +++ /dev/null @@ -1,127 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: lumera.go - -// Package cascadeadaptormocks is a generated GoMock package. -package cascadeadaptormocks - -import ( - context "context" - reflect "reflect" - - types "github.com/LumeraProtocol/lumera/x/action/v1/types" - types0 "github.com/LumeraProtocol/lumera/x/supernode/v1/types" - tx "github.com/cosmos/cosmos-sdk/types/tx" - gomock "github.com/golang/mock/gomock" -) - -// MockLumeraClient is a mock of LumeraClient interface. -type MockLumeraClient struct { - ctrl *gomock.Controller - recorder *MockLumeraClientMockRecorder -} - -// MockLumeraClientMockRecorder is the mock recorder for MockLumeraClient. -type MockLumeraClientMockRecorder struct { - mock *MockLumeraClient -} - -// NewMockLumeraClient creates a new mock instance. -func NewMockLumeraClient(ctrl *gomock.Controller) *MockLumeraClient { - mock := &MockLumeraClient{ctrl: ctrl} - mock.recorder = &MockLumeraClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockLumeraClient) EXPECT() *MockLumeraClientMockRecorder { - return m.recorder -} - -// FinalizeAction mocks base method. -func (m *MockLumeraClient) FinalizeAction(ctx context.Context, actionID string, rqids []string) (*tx.BroadcastTxResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FinalizeAction", ctx, actionID, rqids) - ret0, _ := ret[0].(*tx.BroadcastTxResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FinalizeAction indicates an expected call of FinalizeAction. -func (mr *MockLumeraClientMockRecorder) FinalizeAction(ctx, actionID, rqids interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinalizeAction", reflect.TypeOf((*MockLumeraClient)(nil).FinalizeAction), ctx, actionID, rqids) -} - -// GetAction mocks base method. -func (m *MockLumeraClient) GetAction(ctx context.Context, actionID string) (*types.QueryGetActionResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAction", ctx, actionID) - ret0, _ := ret[0].(*types.QueryGetActionResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAction indicates an expected call of GetAction. -func (mr *MockLumeraClientMockRecorder) GetAction(ctx, actionID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAction", reflect.TypeOf((*MockLumeraClient)(nil).GetAction), ctx, actionID) -} - -// GetActionFee mocks base method. -func (m *MockLumeraClient) GetActionFee(ctx context.Context, dataSize string) (*types.QueryGetActionFeeResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetActionFee", ctx, dataSize) - ret0, _ := ret[0].(*types.QueryGetActionFeeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetActionFee indicates an expected call of GetActionFee. -func (mr *MockLumeraClientMockRecorder) GetActionFee(ctx, dataSize interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActionFee", reflect.TypeOf((*MockLumeraClient)(nil).GetActionFee), ctx, dataSize) -} - -// GetTopSupernodes mocks base method. -func (m *MockLumeraClient) GetTopSupernodes(ctx context.Context, height uint64) (*types0.QueryGetTopSuperNodesForBlockResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTopSupernodes", ctx, height) - ret0, _ := ret[0].(*types0.QueryGetTopSuperNodesForBlockResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetTopSupernodes indicates an expected call of GetTopSupernodes. -func (mr *MockLumeraClientMockRecorder) GetTopSupernodes(ctx, height interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTopSupernodes", reflect.TypeOf((*MockLumeraClient)(nil).GetTopSupernodes), ctx, height) -} - -// SimulateFinalizeAction mocks base method. -func (m *MockLumeraClient) SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*tx.SimulateResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SimulateFinalizeAction", ctx, actionID, rqids) - ret0, _ := ret[0].(*tx.SimulateResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SimulateFinalizeAction indicates an expected call of SimulateFinalizeAction. -func (mr *MockLumeraClientMockRecorder) SimulateFinalizeAction(ctx, actionID, rqids interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateFinalizeAction", reflect.TypeOf((*MockLumeraClient)(nil).SimulateFinalizeAction), ctx, actionID, rqids) -} - -// Verify mocks base method. -func (m *MockLumeraClient) Verify(ctx context.Context, creator string, file, sigBytes []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Verify", ctx, creator, file, sigBytes) - ret0, _ := ret[0].(error) - return ret0 -} - -// Verify indicates an expected call of Verify. -func (mr *MockLumeraClientMockRecorder) Verify(ctx, creator, file, sigBytes interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockLumeraClient)(nil).Verify), ctx, creator, file, sigBytes) -} diff --git a/supernode/services/cascade/adaptors/mocks/p2p_mock.go b/supernode/services/cascade/adaptors/mocks/p2p_mock.go deleted file mode 100644 index ec99d92a..00000000 --- a/supernode/services/cascade/adaptors/mocks/p2p_mock.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: p2p.go - -// Package cascadeadaptormocks is a generated GoMock package. -package cascadeadaptormocks - -import ( - context "context" - reflect "reflect" - - logtrace "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - adaptors "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - gomock "github.com/golang/mock/gomock" -) - -// MockP2PService is a mock of P2PService interface. -type MockP2PService struct { - ctrl *gomock.Controller - recorder *MockP2PServiceMockRecorder -} - -// MockP2PServiceMockRecorder is the mock recorder for MockP2PService. -type MockP2PServiceMockRecorder struct { - mock *MockP2PService -} - -// NewMockP2PService creates a new mock instance. -func NewMockP2PService(ctrl *gomock.Controller) *MockP2PService { - mock := &MockP2PService{ctrl: ctrl} - mock.recorder = &MockP2PServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockP2PService) EXPECT() *MockP2PServiceMockRecorder { - return m.recorder -} - -// StoreArtefacts mocks base method. -func (m *MockP2PService) StoreArtefacts(ctx context.Context, req adaptors.StoreArtefactsRequest, f logtrace.Fields) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StoreArtefacts", ctx, req, f) - ret0, _ := ret[0].(error) - return ret0 -} - -// StoreArtefacts indicates an expected call of StoreArtefacts. -func (mr *MockP2PServiceMockRecorder) StoreArtefacts(ctx, req, f interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreArtefacts", reflect.TypeOf((*MockP2PService)(nil).StoreArtefacts), ctx, req, f) -} diff --git a/supernode/services/cascade/adaptors/mocks/rq_mock.go b/supernode/services/cascade/adaptors/mocks/rq_mock.go deleted file mode 100644 index f45f2eb5..00000000 --- a/supernode/services/cascade/adaptors/mocks/rq_mock.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: rq.go - -// Package cascadeadaptormocks is a generated GoMock package. -package cascadeadaptormocks - -import ( - context "context" - reflect "reflect" - - codec "github.com/LumeraProtocol/supernode/v2/pkg/codec" - adaptors "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - gomock "github.com/golang/mock/gomock" -) - -// MockCodecService is a mock of CodecService interface. -type MockCodecService struct { - ctrl *gomock.Controller - recorder *MockCodecServiceMockRecorder -} - -// MockCodecServiceMockRecorder is the mock recorder for MockCodecService. -type MockCodecServiceMockRecorder struct { - mock *MockCodecService -} - -// NewMockCodecService creates a new mock instance. -func NewMockCodecService(ctrl *gomock.Controller) *MockCodecService { - mock := &MockCodecService{ctrl: ctrl} - mock.recorder = &MockCodecServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCodecService) EXPECT() *MockCodecServiceMockRecorder { - return m.recorder -} - -// Decode mocks base method. -func (m *MockCodecService) Decode(ctx context.Context, req adaptors.DecodeRequest) (adaptors.DecodeResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Decode", ctx, req) - ret0, _ := ret[0].(adaptors.DecodeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Decode indicates an expected call of Decode. -func (mr *MockCodecServiceMockRecorder) Decode(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Decode", reflect.TypeOf((*MockCodecService)(nil).Decode), ctx, req) -} - -// EncodeInput mocks base method. -func (m *MockCodecService) EncodeInput(ctx context.Context, taskID, path string, dataSize int) (adaptors.EncodeResult, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EncodeInput", ctx, taskID, path, dataSize) - ret0, _ := ret[0].(adaptors.EncodeResult) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// EncodeInput indicates an expected call of EncodeInput. -func (mr *MockCodecServiceMockRecorder) EncodeInput(ctx, taskID, path, dataSize interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EncodeInput", reflect.TypeOf((*MockCodecService)(nil).EncodeInput), ctx, taskID, path, dataSize) -} - -// PrepareDecode mocks base method. -func (m *MockCodecService) PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) ([]string, func(int, string, []byte) (string, error), func() error, *codec.Workspace, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrepareDecode", ctx, actionID, layout) - ret0, _ := ret[0].([]string) - ret1, _ := ret[1].(func(int, string, []byte) (string, error)) - ret2, _ := ret[2].(func() error) - ret3, _ := ret[3].(*codec.Workspace) - ret4, _ := ret[4].(error) - return ret0, ret1, ret2, ret3, ret4 -} - -// PrepareDecode indicates an expected call of PrepareDecode. -func (mr *MockCodecServiceMockRecorder) PrepareDecode(ctx, actionID, layout interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareDecode", reflect.TypeOf((*MockCodecService)(nil).PrepareDecode), ctx, actionID, layout) -} diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/services/cascade/adaptors/p2p.go deleted file mode 100644 index 93fed82a..00000000 --- a/supernode/services/cascade/adaptors/p2p.go +++ /dev/null @@ -1,282 +0,0 @@ -package adaptors - -import ( - "context" - "fmt" - "io/fs" - "math" - "math/rand/v2" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/pkg/errors" -) - -const ( - loadSymbolsBatchSize = 3000 - // Minimum first-pass coverage to store before returning from Register (percent) - storeSymbolsPercent = 18 - - storeBatchContextTimeout = 3 * time.Minute -) - -// Local P2P data type identifier used when storing via P2P. -// Value must remain stable to preserve DB semantics. -const P2PDataRaptorQSymbol = 1 - -// P2PService defines the interface for storing data in the P2P layer. -// -//go:generate mockgen -destination=mocks/p2p_mock.go -package=cascadeadaptormocks -source=p2p.go -type P2PService interface { - // StoreArtefacts stores ID files and RaptorQ symbols. - // Metrics are recorded via internal metrics helpers; no metrics are returned. - StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error -} - -// p2pImpl is the default implementation of the P2PService interface. -type p2pImpl struct { - p2p p2p.Client - rqStore rqstore.Store -} - -// NewP2PService returns a concrete implementation of P2PService. -func NewP2PService(client p2p.Client, store rqstore.Store) P2PService { - return &p2pImpl{p2p: client, rqStore: store} -} - -type StoreArtefactsRequest struct { - TaskID string - ActionID string - IDFiles [][]byte - SymbolsDir string -} - -func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error { - logtrace.Info(ctx, "store: p2p start", logtrace.Fields{"taskID": req.TaskID, "actionID": req.ActionID, "id_files": len(req.IDFiles), "symbols_dir": req.SymbolsDir}) - - start := time.Now() - firstPassSymbols, totalSymbols, err := p.storeCascadeSymbolsAndData(ctx, req.TaskID, req.ActionID, req.SymbolsDir, req.IDFiles) - if err != nil { - return errors.Wrap(err, "error storing artefacts") - } - dur := time.Since(start).Milliseconds() - // After first-pass, log how many symbols remain on disk - remaining := 0 - if req.SymbolsDir != "" { - if keys, werr := walkSymbolTree(req.SymbolsDir); werr == nil { - remaining = len(keys) - } - } - logtrace.Info(ctx, "store: first-pass complete", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total_available": totalSymbols, "id_files_count": len(req.IDFiles), "symbols_left_on_disk": remaining, "ms": dur}) - if remaining == 0 { - logtrace.Info(ctx, "store: dir empty after first-pass", logtrace.Fields{"taskID": req.TaskID, "dir": req.SymbolsDir}) - } - // Metrics collection removed; logs retained - return nil -} - -// storeCascadeSymbols loads symbols from `symbolsDir`, optionally downsamples, -// streams them in fixed-size batches to the P2P layer, and tracks: -// - an item-weighted aggregate success rate across all batches -// - the total number of symbols processed (item count) -// - the total number of node requests attempted across batches -// -// Returns (aggRate, totalSymbols, totalRequests, err). -func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, actionID string, symbolsDir string, metadataFiles [][]byte) (int, int, error) { - /* record directory in DB */ - if err := p.rqStore.StoreSymbolDirectory(taskID, symbolsDir); err != nil { - return 0, 0, fmt.Errorf("store symbol dir: %w", err) - } - - /* gather every symbol path under symbolsDir ------------------------- */ - keys, err := walkSymbolTree(symbolsDir) - if err != nil { - return 0, 0, err - } - - totalAvailable := len(keys) - targetCount := int(math.Ceil(float64(totalAvailable) * storeSymbolsPercent / 100.0)) - if targetCount < 1 && totalAvailable > 0 { - targetCount = 1 - } - logtrace.Info(ctx, "store: symbols discovered", logtrace.Fields{"total_symbols": totalAvailable, "dir": symbolsDir}) - logtrace.Info(ctx, "store: target coverage", logtrace.Fields{ - "total_symbols": totalAvailable, - "target_percent": storeSymbolsPercent, - "target_count": targetCount, - }) - - /* down-sample if we exceed the “big directory” threshold ------------- */ - if len(keys) > loadSymbolsBatchSize { - want := targetCount - if want < len(keys) { - rand.Shuffle(len(keys), func(i, j int) { keys[i], keys[j] = keys[j], keys[i] }) - keys = keys[:want] - } - sort.Strings(keys) // deterministic order inside the sample - } - logtrace.Info(ctx, "store: selected symbols", logtrace.Fields{"selected": len(keys), "of_total": totalAvailable, "dir": symbolsDir}) - logtrace.Info(ctx, "store: sending symbols", logtrace.Fields{"count": len(keys)}) - - /* stream in fixed-size batches -------------------------------------- */ - - totalSymbols := 0 // symbols stored - firstBatchProcessed := false - - for start := 0; start < len(keys); { - end := min(start+loadSymbolsBatchSize, len(keys)) - batch := keys[start:end] - - if !firstBatchProcessed && len(metadataFiles) > 0 { - // First "batch" has to include metadata + as many symbols as fit under batch size. - // If metadataFiles >= batch size, we send metadata in this batch and symbols start next batch. - roomForSymbols := loadSymbolsBatchSize - len(metadataFiles) - if roomForSymbols < 0 { - roomForSymbols = 0 - } - if roomForSymbols < len(batch) { - // trim the first symbol chunk to leave space for metadata - batch = batch[:roomForSymbols] - end = start + roomForSymbols - } - - // Load just this symbol chunk - symBytes, err := utils.LoadSymbols(symbolsDir, batch) - if err != nil { - return 0, 0, fmt.Errorf("load symbols: %w", err) - } - - // Build combined payload: metadata first, then symbols - payload := make([][]byte, 0, len(metadataFiles)+len(symBytes)) - payload = append(payload, metadataFiles...) - payload = append(payload, symBytes...) - - // Send as the same data type you use for symbols - logtrace.Info(ctx, "store: batch send (first)", logtrace.Fields{"taskID": taskID, "metadata_count": len(metadataFiles), "symbols_in_batch": len(symBytes), "payload_total": len(payload)}) - bctx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) - err = p.p2p.StoreBatch(bctx, payload, P2PDataRaptorQSymbol, taskID) - cancel() - if err != nil { - return totalSymbols, totalAvailable, fmt.Errorf("p2p store batch (first): %w", err) - } - logtrace.Info(ctx, "store: batch ok (first)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symBytes)}) - - totalSymbols += len(symBytes) - // No per-RPC metrics propagated from p2p - - // Delete only the symbols we uploaded - if len(batch) > 0 { - if err := utils.DeleteSymbols(ctx, symbolsDir, batch); err != nil { - return totalSymbols, totalAvailable, fmt.Errorf("delete symbols: %w", err) - } - } - // Log remaining symbols in directory after deletion - if rem, werr := walkSymbolTree(symbolsDir); werr == nil { - if left := len(rem); left > 0 { - logtrace.Info(ctx, "store: remaining after first batch", logtrace.Fields{"taskID": taskID, "left": left}) - } else { - logtrace.Info(ctx, "store: dir empty after first batch", logtrace.Fields{"taskID": taskID, "dir": symbolsDir}) - } - } - - firstBatchProcessed = true - } else { - count, err := p.storeSymbolsInP2P(ctx, taskID, symbolsDir, batch) - if err != nil { - return totalSymbols, totalAvailable, err - } - totalSymbols += count - } - - start = end - } - - // Coverage uses symbols only - achievedPct := 0.0 - if totalAvailable > 0 { - achievedPct = (float64(totalSymbols) / float64(totalAvailable)) * 100.0 - } - logtrace.Info(ctx, "store: coverage", logtrace.Fields{"achieved_symbols": totalSymbols, "achieved_percent": achievedPct}) - - if err := p.rqStore.UpdateIsFirstBatchStored(actionID); err != nil { - return totalSymbols, totalAvailable, fmt.Errorf("update first-batch flag: %w", err) - } - // Final remaining count after first pass flagged - if rem, werr := walkSymbolTree(symbolsDir); werr == nil { - if left := len(rem); left > 0 { - logtrace.Info(ctx, "store: remaining after first-pass", logtrace.Fields{"taskID": taskID, "left": left, "dir": symbolsDir}) - } else { - logtrace.Info(ctx, "store: directory empty after first-pass", logtrace.Fields{"taskID": taskID, "dir": symbolsDir}) - } - } - - return totalSymbols, totalAvailable, nil - -} - -// walkSymbolTree returns relative file keys for symbols under `root`, -// skipping JSON layout files. -func walkSymbolTree(root string) ([]string, error) { - var keys []string - err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err // propagate I/O errors - } - if d.IsDir() { - return nil // skip directories - } - // ignore layout json if present - if strings.EqualFold(filepath.Ext(d.Name()), ".json") { - return nil - } - rel, err := filepath.Rel(root, path) - if err != nil { - return err - } - keys = append(keys, rel) // store as "block_0/filename" - return nil - }) - if err != nil { - return nil, fmt.Errorf("walk symbol tree: %w", err) - } - return keys, nil -} - -// storeSymbolsInP2P loads a batch of symbols and stores them via P2P. -// Returns (ratePct, requests, count, error) where `count` is the number of symbols in this batch. -func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) (int, error) { - logtrace.Debug(ctx, "loading batch symbols", logtrace.Fields{"taskID": taskID, "count": len(fileKeys)}) - - symbols, err := utils.LoadSymbols(root, fileKeys) - if err != nil { - return 0, fmt.Errorf("load symbols: %w", err) - } - - symCtx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) - defer cancel() - - logtrace.Info(ctx, "store: batch send (symbols)", logtrace.Fields{"taskID": taskID, "symbols_in_batch": len(symbols)}) - if err := c.p2p.StoreBatch(symCtx, symbols, P2PDataRaptorQSymbol, taskID); err != nil { - return len(symbols), fmt.Errorf("p2p store batch: %w", err) - } - logtrace.Info(ctx, "store: batch ok (symbols)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symbols)}) - - if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { - return len(symbols), fmt.Errorf("delete symbols: %w", err) - } - // After deletion, log remaining count in directory - left := -1 - if rem, werr := walkSymbolTree(root); werr == nil { - left = len(rem) - } - logtrace.Debug(ctx, "deleted batch symbols", logtrace.Fields{"taskID": taskID, "count": len(symbols), "symbols_left_on_disk": left}) - - // No per-RPC metrics propagated from p2p - return len(symbols), nil -} diff --git a/supernode/services/cascade/adaptors/rq.go b/supernode/services/cascade/adaptors/rq.go deleted file mode 100644 index 92e89819..00000000 --- a/supernode/services/cascade/adaptors/rq.go +++ /dev/null @@ -1,81 +0,0 @@ -package adaptors - -import ( - "context" - - "github.com/LumeraProtocol/supernode/v2/pkg/codec" -) - -// CodecService defines the interface for RaptorQ encoding of input data. -// -//go:generate mockgen -destination=mocks/rq_mock.go -package=cascadeadaptormocks -source=rq.go -type CodecService interface { - EncodeInput(ctx context.Context, taskID string, path string, dataSize int) (EncodeResult, error) - PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) (blockPaths []string, Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *codec.Workspace, err error) - Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) -} - -// EncodeResult represents the outcome of encoding the input data. -type EncodeResult struct { - SymbolsDir string - Metadata codec.Layout -} - -// codecImpl is the default implementation using the real codec service. -type codecImpl struct { - codec codec.Codec -} - -// NewCodecService creates a new production instance of CodecService. -func NewCodecService(codec codec.Codec) CodecService { - return &codecImpl{codec: codec} -} - -// EncodeInput encodes the provided data and returns symbols and metadata. -func (c *codecImpl) EncodeInput(ctx context.Context, taskID string, path string, dataSize int) (EncodeResult, error) { - resp, err := c.codec.Encode(ctx, codec.EncodeRequest{ - TaskID: taskID, - Path: path, - DataSize: dataSize, - }) - if err != nil { - return EncodeResult{}, err - } - - return EncodeResult{ - SymbolsDir: resp.SymbolsDir, - Metadata: resp.Metadata, - }, nil -} - -type DecodeRequest struct { - Symbols map[string][]byte - Layout codec.Layout - ActionID string -} - -type DecodeResponse struct { - DecodeTmpDir string - FilePath string -} - -// Decode decodes the provided symbols and returns the original file -func (c *codecImpl) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) { - resp, err := c.codec.Decode(ctx, codec.DecodeRequest{ - Symbols: req.Symbols, - Layout: req.Layout, - ActionID: req.ActionID, - }) - if err != nil { - return DecodeResponse{}, err - } - - return DecodeResponse{ - FilePath: resp.FilePath, - DecodeTmpDir: resp.DecodeTmpDir, - }, nil -} - -func (c *codecImpl) PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) (blockPaths []string, Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *codec.Workspace, err error) { - return -} diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go deleted file mode 100644 index 0c5c9ed7..00000000 --- a/supernode/services/cascade/download.go +++ /dev/null @@ -1,375 +0,0 @@ -package cascade - -import ( - "context" - "encoding/json" - "fmt" - "os" - "sort" - "time" - - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/crypto" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" -) - -const targetRequiredPercent = 17 - -type DownloadRequest struct { - ActionID string - // Signature is required for private downloads. For public cascade - // actions (metadata.Public == true), this is ignored. - Signature string -} - -type DownloadResponse struct { - EventType SupernodeEventType - Message string - FilePath string - DownloadedDir string -} - -// Download retrieves a cascade artefact by action ID. -// -// Authorization behavior: -// - If the cascade metadata has Public = true, signature verification is skipped -// and the file is downloadable by anyone. -// - If Public = false, a valid download signature is required. -func (task *CascadeRegistrationTask) Download( - ctx context.Context, - req *DownloadRequest, - send func(resp *DownloadResponse) error, -) (err error) { - // Seed correlation ID and origin from actionID for downstream logs - if req != nil && req.ActionID != "" { - ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) - ctx = logtrace.CtxWithOrigin(ctx, "download") - } - fields := logtrace.Fields{logtrace.FieldMethod: "Download", logtrace.FieldRequest: req} - logtrace.Info(ctx, "download: request", fields) - - // Ensure task status is finalized regardless of outcome - defer func() { - if err != nil { - task.UpdateStatus(common.StatusTaskCanceled) - } else { - task.UpdateStatus(common.StatusTaskCompleted) - } - task.Cancel() - }() - - actionDetails, err := task.LumeraClient.GetAction(ctx, req.ActionID) - if err != nil { - // Ensure error is logged as string for consistency - fields[logtrace.FieldError] = err.Error() - return task.wrapErr(ctx, "failed to get action", err, fields) - } - logtrace.Info(ctx, "download: action fetched", fields) - task.streamDownloadEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", "", send) - - if actionDetails.GetAction().State != actiontypes.ActionStateDone { - // Return a clearer error message when action is not yet finalized - err = errors.New("action is not in a valid state") - fields[logtrace.FieldError] = "action state is not done yet" - fields[logtrace.FieldActionState] = actionDetails.GetAction().State - return task.wrapErr(ctx, "action not finalized yet", err, fields) - } - logtrace.Info(ctx, "download: action state ok", fields) - -metadata, err := cascadekit.UnmarshalCascadeMetadata(actionDetails.GetAction().Metadata) - if err != nil { - fields[logtrace.FieldError] = err.Error() - return task.wrapErr(ctx, "error decoding cascade metadata", err, fields) - } - logtrace.Info(ctx, "download: metadata decoded", fields) - task.streamDownloadEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", "", send) - - // Enforce download authorization based on metadata.Public - // - If public: skip signature verification; allow anonymous downloads - // - If private: require a valid signature - if !metadata.Public { - if req.Signature == "" { - fields[logtrace.FieldError] = "missing signature for private download" - // Provide a descriptive message without a fabricated root error - return task.wrapErr(ctx, "private cascade requires a download signature", nil, fields) - } - if err := task.VerifyDownloadSignature(ctx, req.ActionID, req.Signature); err != nil { - fields[logtrace.FieldError] = err.Error() - return task.wrapErr(ctx, "failed to verify download signature", err, fields) - } - logtrace.Info(ctx, "download: signature verified", fields) - } else { - logtrace.Info(ctx, "download: public cascade (no signature)", fields) - } - - // Notify: network retrieval phase begins - task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) - - logtrace.Info(ctx, "download: network retrieval start", logtrace.Fields{logtrace.FieldActionID: actionDetails.GetAction().ActionID}) - filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields, send) - if err != nil { - fields[logtrace.FieldError] = err.Error() - // Ensure temporary decode directory is cleaned if decode failed after being created - if tmpDir != "" { - if cerr := task.CleanupDownload(ctx, tmpDir); cerr != nil { - logtrace.Warn(ctx, "cleanup of tmp dir after error failed", logtrace.Fields{"tmp_dir": tmpDir, logtrace.FieldError: cerr.Error()}) - } - } - return task.wrapErr(ctx, "failed to download artifacts", err, fields) - } - logtrace.Debug(ctx, "File reconstructed and hash verified", fields) - // Notify: decode completed, file ready on disk - task.streamDownloadEvent(SupernodeEventTypeDecodeCompleted, "Decode completed", filePath, tmpDir, send) - - return nil -} - -func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, actionID string, metadata actiontypes.CascadeMetadata, fields logtrace.Fields, send func(resp *DownloadResponse) error) (string, string, error) { - logtrace.Debug(ctx, "started downloading the artifacts", fields) - - var ( - layout codec.Layout - layoutFetchMS int64 - layoutDecodeMS int64 - layoutAttempts int - ) - - for _, indexID := range metadata.RqIdsIds { - iStart := time.Now() - logtrace.Debug(ctx, "RPC Retrieve index file", logtrace.Fields{"index_id": indexID}) - indexFile, err := task.P2PClient.Retrieve(ctx, indexID) - if err != nil || len(indexFile) == 0 { - logtrace.Warn(ctx, "Retrieve index file failed or empty", logtrace.Fields{"index_id": indexID, logtrace.FieldError: fmt.Sprintf("%v", err)}) - continue - } - logtrace.Debug(ctx, "Retrieve index file completed", logtrace.Fields{"index_id": indexID, "bytes": len(indexFile), "ms": time.Since(iStart).Milliseconds()}) - - // Parse index file to get layout IDs - indexData, err := cascadekit.ParseCompressedIndexFile(indexFile) - if err != nil { - logtrace.Warn(ctx, "failed to parse index file", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error()}) - continue - } - - // Try to retrieve layout files using layout IDs from index file - var netMS, decMS int64 - layout, netMS, decMS, layoutAttempts, err = task.retrieveLayoutFromIndex(ctx, indexData, fields) - if err != nil { - logtrace.Warn(ctx, "failed to retrieve layout from index", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error(), "attempts": layoutAttempts}) - continue - } - layoutFetchMS = netMS - layoutDecodeMS = decMS - - if len(layout.Blocks) > 0 { - logtrace.Debug(ctx, "layout file retrieved via index", logtrace.Fields{"index_id": indexID, "attempts": layoutAttempts, "net_ms": layoutFetchMS, "decode_ms": layoutDecodeMS}) - break - } - } - - if len(layout.Blocks) == 0 { - return "", "", errors.New("no symbols found in RQ metadata") - } - // Persist layout timing in fields for downstream metrics - fields["layout_fetch_ms"] = layoutFetchMS - fields["layout_decode_ms"] = layoutDecodeMS - fields["layout_attempts"] = layoutAttempts - return task.restoreFileFromLayout(ctx, layout, metadata.DataHash, actionID, send) -} - -// restoreFileFromLayout reconstructs the original file from the provided layout -// and a subset of retrieved symbols. The method deduplicates symbol identifiers -// before network retrieval to avoid redundant requests and ensure the requested -// count reflects unique symbols only. -func (task *CascadeRegistrationTask) restoreFileFromLayout( - ctx context.Context, - layout codec.Layout, - dataHash string, - actionID string, - send func(resp *DownloadResponse) error, -) (string, string, error) { - - fields := logtrace.Fields{ - logtrace.FieldActionID: actionID, - } - // Deduplicate symbols across blocks to avoid redundant requests - symSet := make(map[string]struct{}) - for _, block := range layout.Blocks { - for _, s := range block.Symbols { - symSet[s] = struct{}{} - } - } - allSymbols := make([]string, 0, len(symSet)) - for s := range symSet { - allSymbols = append(allSymbols, s) - } - sort.Strings(allSymbols) - - totalSymbols := len(allSymbols) - fields["totalSymbols"] = totalSymbols - // Compute target requirement (reporting only; does not change behavior) - targetRequiredCount := (totalSymbols*targetRequiredPercent + 99) / 100 - if targetRequiredCount < 1 && totalSymbols > 0 { - targetRequiredCount = 1 - } - logtrace.Info(ctx, "download: plan symbols", logtrace.Fields{"total_symbols": totalSymbols, "target_required_percent": targetRequiredPercent, "target_required_count": targetRequiredCount}) - - // Measure symbols batch retrieve duration - retrieveStart := time.Now() - // Use context as-is; metrics task tagging removed - // Retrieve only a fraction of symbols (targetRequiredCount) based on redundancy - // The DHT will short-circuit once it finds the required number across the provided keys - reqCount := targetRequiredCount - if reqCount > totalSymbols { - reqCount = totalSymbols - } - rStart := time.Now() - logtrace.Info(ctx, "download: batch retrieve start", logtrace.Fields{"action_id": actionID, "requested": reqCount, "total_candidates": totalSymbols}) - symbols, err := task.P2PClient.BatchRetrieve(ctx, allSymbols, reqCount, actionID) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "batch retrieve failed", fields) - return "", "", fmt.Errorf("batch retrieve symbols: %w", err) - } - retrieveMS := time.Since(retrieveStart).Milliseconds() - logtrace.Info(ctx, "download: batch retrieve ok", logtrace.Fields{"action_id": actionID, "received": len(symbols), "ms": time.Since(rStart).Milliseconds()}) - - // Measure decode duration - decodeStart := time.Now() - dStart := time.Now() - logtrace.Info(ctx, "download: decode start", logtrace.Fields{"action_id": actionID}) - decodeInfo, err := task.RQ.Decode(ctx, adaptors.DecodeRequest{ - ActionID: actionID, - Symbols: symbols, - Layout: layout, - }) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "decode failed", fields) - return "", "", fmt.Errorf("decode symbols using RaptorQ: %w", err) - } - decodeMS := time.Since(decodeStart).Milliseconds() - logtrace.Info(ctx, "download: decode ok", logtrace.Fields{"action_id": actionID, "ms": time.Since(dStart).Milliseconds(), "tmp_dir": decodeInfo.DecodeTmpDir, "file_path": decodeInfo.FilePath}) - - // Emit minimal JSON payload (metrics system removed) - minPayload := map[string]any{ - "retrieve": map[string]any{ - "retrieve_ms": retrieveMS, - "decode_ms": decodeMS, - "target_required_percent": targetRequiredPercent, - "target_required_count": targetRequiredCount, - "total_symbols": totalSymbols, - }, - } - if b, err := json.MarshalIndent(minPayload, "", " "); err == nil { - task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), "", "", send) - } - - fileHash, err := crypto.HashFileIncrementally(decodeInfo.FilePath, 0) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to hash file", fields) - return "", "", fmt.Errorf("hash file: %w", err) - } - if fileHash == nil { - fields[logtrace.FieldError] = "file hash is nil" - logtrace.Error(ctx, "failed to hash file", fields) - return "", "", errors.New("file hash is nil") - } - - err = cascadekit.VerifyB64DataHash(fileHash, dataHash) - if err != nil { - logtrace.Error(ctx, "failed to verify hash", fields) - fields[logtrace.FieldError] = err.Error() - return "", decodeInfo.DecodeTmpDir, err - } - // Preserve original debug log for successful hash match - logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) - // Log the state of the temporary decode directory - if decodeInfo.DecodeTmpDir != "" { - if set, derr := utils.ReadDirFilenames(decodeInfo.DecodeTmpDir); derr == nil { - if left := len(set); left > 0 { - logtrace.Debug(ctx, "Decode tmp directory has files remaining", logtrace.Fields{"dir": decodeInfo.DecodeTmpDir, "left": left}) - } else { - logtrace.Debug(ctx, "Decode tmp directory is empty", logtrace.Fields{"dir": decodeInfo.DecodeTmpDir}) - } - } - } - logtrace.Info(ctx, "download: file verified", fields) - - return decodeInfo.FilePath, decodeInfo.DecodeTmpDir, nil -} - -func (task *CascadeRegistrationTask) streamDownloadEvent(eventType SupernodeEventType, msg string, filePath string, tmpDir string, send func(resp *DownloadResponse) error) { - _ = send(&DownloadResponse{ - EventType: eventType, - Message: msg, - FilePath: filePath, - DownloadedDir: tmpDir, - }) -} - -// parseIndexFile parses compressed index file to extract IndexFile structure -// parseIndexFile moved to cascadekit.ParseCompressedIndexFile - -// retrieveLayoutFromIndex retrieves layout file using layout IDs from index file -func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context, indexData cascadekit.IndexFile, fields logtrace.Fields) (codec.Layout, int64, int64, int, error) { - // Try to retrieve layout files using layout IDs from index file - var ( - totalFetchMS int64 - totalDecodeMS int64 - attempts int - ) - for _, layoutID := range indexData.LayoutIDs { - attempts++ - t0 := time.Now() - logtrace.Debug(ctx, "RPC Retrieve layout file", logtrace.Fields{"layout_id": layoutID, "attempt": attempts}) - layoutFile, err := task.P2PClient.Retrieve(ctx, layoutID) - took := time.Since(t0).Milliseconds() - totalFetchMS += took - if err != nil || len(layoutFile) == 0 { - logtrace.Warn(ctx, "Retrieve layout file failed or empty", logtrace.Fields{"layout_id": layoutID, "attempt": attempts, "ms": took, logtrace.FieldError: fmt.Sprintf("%v", err)}) - continue - } - - t1 := time.Now() - layout, _, _, err := cascadekit.ParseRQMetadataFile(layoutFile) - decMS := time.Since(t1).Milliseconds() - totalDecodeMS += decMS - if err != nil { - logtrace.Warn(ctx, "Parse layout file failed", logtrace.Fields{"layout_id": layoutID, "attempt": attempts, "decode_ms": decMS, logtrace.FieldError: err.Error()}) - continue - } - - if len(layout.Blocks) > 0 { - logtrace.Debug(ctx, "Layout file retrieved and parsed", logtrace.Fields{"layout_id": layoutID, "attempt": attempts, "net_ms": took, "decode_ms": decMS}) - return layout, totalFetchMS, totalDecodeMS, attempts, nil - } - } - - return codec.Layout{}, totalFetchMS, totalDecodeMS, attempts, errors.New("no valid layout found in index") -} - -// CleanupDownload removes the temporary directory created during decode. -// The parameter is a directory path (not an action ID). -func (task *CascadeRegistrationTask) CleanupDownload(ctx context.Context, dirPath string) error { - if dirPath == "" { - return errors.New("directory path is empty") - } - - // For now, we use tmp directory path as provided by decoder - logtrace.Debug(ctx, "Cleanup download directory", logtrace.Fields{"dir": dirPath}) - if err := os.RemoveAll(dirPath); err != nil { - logtrace.Warn(ctx, "Cleanup download directory failed", logtrace.Fields{"dir": dirPath, logtrace.FieldError: err.Error()}) - return errors.Errorf("failed to delete download directory: %s, :%s", dirPath, err.Error()) - } - logtrace.Debug(ctx, "Cleanup download directory completed", logtrace.Fields{"dir": dirPath}) - - return nil -} diff --git a/supernode/services/cascade/events_test.go b/supernode/services/cascade/events_test.go deleted file mode 100644 index ddf98871..00000000 --- a/supernode/services/cascade/events_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package cascade - -import ( - "testing" -) - -func TestSupernodeEventTypeValues(t *testing.T) { - tests := []struct { - name string - value SupernodeEventType - expected int - }{ - {"UNKNOWN", SupernodeEventTypeUNKNOWN, 0}, - {"ActionRetrieved", SupernodeEventTypeActionRetrieved, 1}, - {"ActionFeeVerified", SupernodeEventTypeActionFeeVerified, 2}, - {"TopSupernodeCheckPassed", SupernodeEventTypeTopSupernodeCheckPassed, 3}, - {"MetadataDecoded", SupernodeEventTypeMetadataDecoded, 4}, - {"DataHashVerified", SupernodeEventTypeDataHashVerified, 5}, - {"InputEncoded", SupernodeEventTypeInputEncoded, 6}, - {"SignatureVerified", SupernodeEventTypeSignatureVerified, 7}, - {"RQIDsGenerated", SupernodeEventTypeRQIDsGenerated, 8}, - {"RqIDsVerified", SupernodeEventTypeRqIDsVerified, 9}, - {"FinalizeSimulated", SupernodeEventTypeFinalizeSimulated, 10}, - {"ArtefactsStored", SupernodeEventTypeArtefactsStored, 11}, - {"ActionFinalized", SupernodeEventTypeActionFinalized, 12}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if int(tt.value) != tt.expected { - t.Errorf("Expected %s to be %d, got %d", tt.name, tt.expected, tt.value) - } - }) - } -} diff --git a/supernode/services/cascade/helper.go b/supernode/services/cascade/helper.go deleted file mode 100644 index 5a36b644..00000000 --- a/supernode/services/cascade/helper.go +++ /dev/null @@ -1,292 +0,0 @@ -package cascade - -import ( - "context" - "encoding/base64" - "fmt" - "strconv" - - "cosmossdk.io/math" - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - - sdk "github.com/cosmos/cosmos-sdk/types" - json "github.com/json-iterator/go" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// layout stats helpers removed to keep download metrics minimal. - -func (task *CascadeRegistrationTask) fetchAction(ctx context.Context, actionID string, f logtrace.Fields) (*actiontypes.Action, error) { - res, err := task.LumeraClient.GetAction(ctx, actionID) - if err != nil { - return nil, task.wrapErr(ctx, "failed to get action", err, f) - } - - if res.GetAction().ActionID == "" { - return nil, task.wrapErr(ctx, "action not found", errors.New(""), f) - } - logtrace.Debug(ctx, "action has been retrieved", f) - - return res.GetAction(), nil -} - -func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, blockHeight uint64, f logtrace.Fields) error { - top, err := task.LumeraClient.GetTopSupernodes(ctx, blockHeight) - if err != nil { - return task.wrapErr(ctx, "failed to get top SNs", err, f) - } - logtrace.Debug(ctx, "Fetched Top Supernodes", f) - - if !supernode.Exists(top.Supernodes, task.config.SupernodeAccountAddress) { - // Build information about supernodes for better error context - addresses := make([]string, len(top.Supernodes)) - for i, sn := range top.Supernodes { - addresses[i] = sn.SupernodeAccount - } - logtrace.Debug(ctx, "Supernode not in top list", logtrace.Fields{ - "currentAddress": task.config.SupernodeAccountAddress, - "topSupernodes": addresses, - }) - return task.wrapErr(ctx, "current supernode does not exist in the top SNs list", - errors.Errorf("current address: %s, top supernodes: %v", task.config.SupernodeAccountAddress, addresses), f) - } - - return nil -} - -// decodeCascadeMetadata moved to cascadekit.UnmarshalCascadeMetadata -// verifyDataHash moved to cascadekit.VerifyB64DataHash - -func (task *CascadeRegistrationTask) encodeInput(ctx context.Context, actionID string, path string, dataSize int, f logtrace.Fields) (*adaptors.EncodeResult, error) { - resp, err := task.RQ.EncodeInput(ctx, actionID, path, dataSize) - if err != nil { - return nil, task.wrapErr(ctx, "failed to encode data", err, f) - } - return &resp, nil -} - -func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context.Context, encoded string, creator string, - encodedMeta codec.Layout, f logtrace.Fields) (codec.Layout, string, error) { - - // Extract index file and creator signature from encoded data - // The signatures field contains: Base64(index_file).creators_signature - indexFileB64, creatorSig, err := cascadekit.ExtractIndexAndCreatorSig(encoded) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to extract index file and creator signature", err, f) - } - - // Verify creator signature on index file - creatorSigBytes, err := base64.StdEncoding.DecodeString(creatorSig) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode creator signature from base64", err, f) - } - - if err := task.LumeraClient.Verify(ctx, creator, []byte(indexFileB64), creatorSigBytes); err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify creator signature", err, f) - } - logtrace.Debug(ctx, "creator signature successfully verified", f) - - // Decode index file to get the layout signature - indexFile, err := cascadekit.DecodeIndexB64(indexFileB64) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode index file", err, f) - } - - // Verify layout signature on the actual layout - layoutSigBytes, err := base64.StdEncoding.DecodeString(indexFile.LayoutSignature) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode layout signature from base64", err, f) - } - - layoutJSON, err := json.Marshal(encodedMeta) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to marshal layout", err, f) - } - layoutB64 := utils.B64Encode(layoutJSON) - if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSigBytes); err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify layout signature", err, f) - } - logtrace.Debug(ctx, "layout signature successfully verified", f) - - return encodedMeta, indexFile.LayoutSignature, nil -} - -func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta actiontypes.CascadeMetadata, - sig, creator string, encodedMeta codec.Layout, f logtrace.Fields) (cascadekit.GenRQIdentifiersFilesResponse, error) { - // The signatures field contains: Base64(index_file).creators_signature - // This full format will be used for ID generation to match chain expectations - - // Generate layout files (redundant metadata files) - layoutRes, err := cascadekit.GenerateLayoutFiles(ctx, encodedMeta, sig, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) - if err != nil { - return cascadekit.GenRQIdentifiersFilesResponse{}, - task.wrapErr(ctx, "failed to generate layout files", err, f) - } - - // Generate index files using full signatures format for ID generation (matches chain expectation) - indexIDs, indexFiles, err := cascadekit.GenerateIndexFiles(ctx, meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) - if err != nil { - return cascadekit.GenRQIdentifiersFilesResponse{}, - task.wrapErr(ctx, "failed to generate index files", err, f) - } - - // Store layout files and index files separately in P2P - allFiles := append(layoutRes.RedundantMetadataFiles, indexFiles...) - - // Return index IDs (sent to chain) and all files (stored in P2P) - return cascadekit.GenRQIdentifiersFilesResponse{ - RQIDs: indexIDs, - RedundantMetadataFiles: allFiles, - }, nil -} - -// storeArtefacts persists cascade artefacts (ID files + RaptorQ symbols) via the -// P2P adaptor. P2P does not return metrics; cascade summarizes and emits them. -func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionID string, idFiles [][]byte, symbolsDir string, f logtrace.Fields) error { - if f == nil { - f = logtrace.Fields{} - } - lf := logtrace.Fields{ - logtrace.FieldActionID: actionID, - logtrace.FieldTaskID: task.ID(), - "id_files_count": len(idFiles), - "symbols_dir": symbolsDir, - } - for k, v := range f { - lf[k] = v - } - // Tag the flow as first-pass just before handing over to P2P - ctx = logtrace.CtxWithOrigin(ctx, "first_pass") - logtrace.Info(ctx, "store: first-pass begin", lf) - - if err := task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{ - IDFiles: idFiles, - SymbolsDir: symbolsDir, - TaskID: task.ID(), - ActionID: actionID, - }, f); err != nil { - // Log and wrap to ensure a proper error line and context - return task.wrapErr(ctx, "failed to store artefacts", err, lf) - } - return nil -} - -func (task *CascadeRegistrationTask) wrapErr(ctx context.Context, msg string, err error, f logtrace.Fields) error { - if err != nil { - f[logtrace.FieldError] = err.Error() - } - logtrace.Error(ctx, msg, f) - - // Preserve the root cause in the gRPC error description so callers receive full context. - if err != nil { - return status.Errorf(codes.Internal, "%s: %v", msg, err) - } - return status.Errorf(codes.Internal, "%s", msg) -} - -// emitArtefactsStored builds a single-line metrics summary and emits the -// SupernodeEventTypeArtefactsStored event while logging the metrics line. -func (task *CascadeRegistrationTask) emitArtefactsStored( - ctx context.Context, - fields logtrace.Fields, - _ codec.Layout, - send func(resp *RegisterResponse) error, -) { - if fields == nil { - fields = logtrace.Fields{} - } - - // Emit a minimal event message (metrics system removed) - msg := "Artefacts stored" - logtrace.Debug(ctx, "artefacts have been stored", fields) - task.streamEvent(SupernodeEventTypeArtefactsStored, msg, "", send) -} - -// Removed legacy helpers; functionality is centralized in cascadekit. - -// - -// verifyActionFee checks if the action fee is sufficient for the given data size -// It fetches action parameters, calculates the required fee, and compares it with the action price -func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action *actiontypes.Action, dataSize int, fields logtrace.Fields) error { - dataSizeInKBs := dataSize / 1024 - fee, err := task.LumeraClient.GetActionFee(ctx, strconv.Itoa(dataSizeInKBs)) - if err != nil { - return task.wrapErr(ctx, "failed to get action fee", err, fields) - } - - // Parse fee amount from string to int64 - amount, err := strconv.ParseInt(fee.Amount, 10, 64) - if err != nil { - return task.wrapErr(ctx, "failed to parse fee amount", err, fields) - } - - // Calculate per-byte fee based on data size - requiredFee := sdk.NewCoin("ulume", math.NewInt(amount)) - - // Log the calculated fee - logtrace.Debug(ctx, "calculated required fee", logtrace.Fields{ - "fee": requiredFee.String(), - "dataBytes": dataSize, - }) - // Check if action price is less than required fee - if action.Price.IsLT(requiredFee) { - return task.wrapErr( - ctx, - "insufficient fee", - fmt.Errorf("expected at least %s, got %s", requiredFee.String(), action.Price.String()), - fields, - ) - } - - return nil -} - -// - -// - -// - -// VerifyDownloadSignature verifies the download signature for actionID.creatorAddress -func (task *CascadeRegistrationTask) VerifyDownloadSignature(ctx context.Context, actionID, signature string) error { - fields := logtrace.Fields{ - logtrace.FieldActionID: actionID, - logtrace.FieldMethod: "VerifyDownloadSignature", - } - - // Get action details to extract creator address - actionDetails, err := task.LumeraClient.GetAction(ctx, actionID) - if err != nil { - return task.wrapErr(ctx, "failed to get action", err, fields) - } - - creatorAddress := actionDetails.GetAction().Creator - fields["creator_address"] = creatorAddress - - // Create the expected signature data: actionID (creator address not included in payload) - signatureData := fmt.Sprintf("%s", actionID) - fields["signature_data"] = signatureData - - // Decode the base64 signature - signatureBytes, err := base64.StdEncoding.DecodeString(signature) - if err != nil { - return task.wrapErr(ctx, "failed to decode signature from base64", err, fields) - } - - // Verify the signature using Lumera client - if err := task.LumeraClient.Verify(ctx, creatorAddress, []byte(signatureData), signatureBytes); err != nil { - return task.wrapErr(ctx, "failed to verify download signature", err, fields) - } - - logtrace.Debug(ctx, "download signature successfully verified", fields) - return nil -} diff --git a/supernode/services/cascade/mocks/cascade_interfaces_mock.go b/supernode/services/cascade/mocks/cascade_interfaces_mock.go deleted file mode 100644 index 44d3189c..00000000 --- a/supernode/services/cascade/mocks/cascade_interfaces_mock.go +++ /dev/null @@ -1,115 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: interfaces.go - -// Package cascademocks is a generated GoMock package. -package cascademocks - -import ( - context "context" - reflect "reflect" - - cascade "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - gomock "github.com/golang/mock/gomock" -) - -// MockCascadeServiceFactory is a mock of CascadeServiceFactory interface. -type MockCascadeServiceFactory struct { - ctrl *gomock.Controller - recorder *MockCascadeServiceFactoryMockRecorder -} - -// MockCascadeServiceFactoryMockRecorder is the mock recorder for MockCascadeServiceFactory. -type MockCascadeServiceFactoryMockRecorder struct { - mock *MockCascadeServiceFactory -} - -// NewMockCascadeServiceFactory creates a new mock instance. -func NewMockCascadeServiceFactory(ctrl *gomock.Controller) *MockCascadeServiceFactory { - mock := &MockCascadeServiceFactory{ctrl: ctrl} - mock.recorder = &MockCascadeServiceFactoryMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCascadeServiceFactory) EXPECT() *MockCascadeServiceFactoryMockRecorder { - return m.recorder -} - -// NewCascadeRegistrationTask mocks base method. -func (m *MockCascadeServiceFactory) NewCascadeRegistrationTask() cascade.CascadeTask { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewCascadeRegistrationTask") - ret0, _ := ret[0].(cascade.CascadeTask) - return ret0 -} - -// NewCascadeRegistrationTask indicates an expected call of NewCascadeRegistrationTask. -func (mr *MockCascadeServiceFactoryMockRecorder) NewCascadeRegistrationTask() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewCascadeRegistrationTask", reflect.TypeOf((*MockCascadeServiceFactory)(nil).NewCascadeRegistrationTask)) -} - -// MockCascadeTask is a mock of CascadeTask interface. -type MockCascadeTask struct { - ctrl *gomock.Controller - recorder *MockCascadeTaskMockRecorder -} - -// MockCascadeTaskMockRecorder is the mock recorder for MockCascadeTask. -type MockCascadeTaskMockRecorder struct { - mock *MockCascadeTask -} - -// NewMockCascadeTask creates a new mock instance. -func NewMockCascadeTask(ctrl *gomock.Controller) *MockCascadeTask { - mock := &MockCascadeTask{ctrl: ctrl} - mock.recorder = &MockCascadeTaskMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCascadeTask) EXPECT() *MockCascadeTaskMockRecorder { - return m.recorder -} - -// CleanupDownload mocks base method. -func (m *MockCascadeTask) CleanupDownload(ctx context.Context, actionID string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CleanupDownload", ctx, actionID) - ret0, _ := ret[0].(error) - return ret0 -} - -// CleanupDownload indicates an expected call of CleanupDownload. -func (mr *MockCascadeTaskMockRecorder) CleanupDownload(ctx, actionID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupDownload", reflect.TypeOf((*MockCascadeTask)(nil).CleanupDownload), ctx, actionID) -} - -// Download mocks base method. -func (m *MockCascadeTask) Download(ctx context.Context, req *cascade.DownloadRequest, send func(*cascade.DownloadResponse) error) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Download", ctx, req, send) - ret0, _ := ret[0].(error) - return ret0 -} - -// Download indicates an expected call of Download. -func (mr *MockCascadeTaskMockRecorder) Download(ctx, req, send interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockCascadeTask)(nil).Download), ctx, req, send) -} - -// Register mocks base method. -func (m *MockCascadeTask) Register(ctx context.Context, req *cascade.RegisterRequest, send func(*cascade.RegisterResponse) error) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Register", ctx, req, send) - ret0, _ := ret[0].(error) - return ret0 -} - -// Register indicates an expected call of Register. -func (mr *MockCascadeTaskMockRecorder) Register(ctx, req, send interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockCascadeTask)(nil).Register), ctx, req, send) -} diff --git a/supernode/services/cascade/register.go b/supernode/services/cascade/register.go deleted file mode 100644 index 866420aa..00000000 --- a/supernode/services/cascade/register.go +++ /dev/null @@ -1,191 +0,0 @@ -package cascade - -import ( - "context" - "os" - - "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" -) - -// RegisterRequest contains parameters for upload request -type RegisterRequest struct { - TaskID string - ActionID string - DataHash []byte - DataSize int - FilePath string -} - -// RegisterResponse contains the result of upload -type RegisterResponse struct { - EventType SupernodeEventType - Message string - TxHash string -} - -// Register processes the upload request for cascade input data. -// 1- Fetch & validate action (it should be a cascade action registered on the chain) -// 2- Ensure this super-node is eligible to process the action (should be in the top supernodes list for the action block height) -// 3- Get the cascade metadata from the action: it contains the data hash and the signatures -// -// Assuming data hash is a base64 encoded string of blake3 hash of the data -// The signatures field is: b64(JSON(Layout)).Signature where Layout is codec.Layout -// The layout is a JSON object that contains the metadata of the data -// -// 4- Verify the data hash (the data hash should match the one in the action ticket) - again, hash function should be blake3 -// 5- Generate Symbols with codec (RQ-Go Library) (the data should be encoded using the codec) -// 6- Extract the layout and the signature from Step 3. Verify the signature using the creator's public key (creator address is in the action) -// 7- Generate RQ-ID files from the layout that we generated locally and then match those with the ones in the action -// 8- Verify the IDs in the layout and the metadata (the IDs should match the ones in the action) -// 9- Store the artefacts in P2P Storage (the redundant metadata files and the symbols from the symbols dir) -func (task *CascadeRegistrationTask) Register( - ctx context.Context, - req *RegisterRequest, - send func(resp *RegisterResponse) error, -) (err error) { - // Seed correlation ID and origin so logs across layers can be joined and filtered - if req != nil && req.ActionID != "" { - ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) - ctx = logtrace.CtxWithOrigin(ctx, "first_pass") - } - - fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} - logtrace.Info(ctx, "register: request", fields) - - // Ensure task status and resources are finalized regardless of outcome - defer func() { - if err != nil { - task.UpdateStatus(common.StatusTaskCanceled) - } else { - task.UpdateStatus(common.StatusTaskCompleted) - } - task.Cancel() - }() - - // Always attempt to remove the uploaded file path - defer func() { - if req != nil && req.FilePath != "" { - if remErr := os.RemoveAll(req.FilePath); remErr != nil { - logtrace.Warn(ctx, "Failed to remove uploaded file", fields) - } else { - logtrace.Debug(ctx, "Uploaded file cleaned up", fields) - } - } - }() - - /* 1. Fetch & validate action -------------------------------------------------- */ - action, err := task.fetchAction(ctx, req.ActionID, fields) - if err != nil { - return err - } - fields[logtrace.FieldBlockHeight] = action.BlockHeight - fields[logtrace.FieldCreator] = action.Creator - fields[logtrace.FieldStatus] = action.State - fields[logtrace.FieldPrice] = action.Price - logtrace.Info(ctx, "register: action fetched", fields) - task.streamEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", send) - - /* 2. Verify action fee -------------------------------------------------------- */ - if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil { - return err - } - logtrace.Info(ctx, "register: fee verified", fields) - task.streamEvent(SupernodeEventTypeActionFeeVerified, "Action fee verified", "", send) - - /* 3. Ensure this super-node is eligible -------------------------------------- */ - fields[logtrace.FieldSupernodeState] = task.config.SupernodeAccountAddress - if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil { - return err - } - logtrace.Info(ctx, "register: top supernode confirmed", fields) - task.streamEvent(SupernodeEventTypeTopSupernodeCheckPassed, "Top supernode eligibility confirmed", "", send) - - /* 4. Decode cascade metadata -------------------------------------------------- */ - cascadeMeta, err := cascadekit.UnmarshalCascadeMetadata(action.Metadata) - if err != nil { - return task.wrapErr(ctx, "failed to unmarshal cascade metadata", err, fields) - } - logtrace.Info(ctx, "register: metadata decoded", fields) - task.streamEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", send) - - /* 5. Verify data hash --------------------------------------------------------- */ - if err := cascadekit.VerifyB64DataHash(req.DataHash, cascadeMeta.DataHash); err != nil { - return err - } - logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) - logtrace.Info(ctx, "register: data hash matched", fields) - task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send) - - /* 6. Encode the raw data ------------------------------------------------------ */ - encResp, err := task.encodeInput(ctx, req.ActionID, req.FilePath, req.DataSize, fields) - if err != nil { - return err - } - // Promote to Info and include symbols directory for quick visibility - fields["symbols_dir"] = encResp.SymbolsDir - logtrace.Info(ctx, "register: input encoded", fields) - task.streamEvent(SupernodeEventTypeInputEncoded, "Input encoded", "", send) - - /* 7. Signature verification + layout decode ---------------------------------- */ - layout, signature, err := task.verifySignatureAndDecodeLayout( - ctx, cascadeMeta.Signatures, action.Creator, encResp.Metadata, fields, - ) - if err != nil { - return err - } - logtrace.Info(ctx, "register: signature verified", fields) - task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) - - /* 8. Generate RQ-ID files ----------------------------------------------------- */ - rqidResp, err := task.generateRQIDFiles(ctx, cascadeMeta, signature, action.Creator, encResp.Metadata, fields) - if err != nil { - return err - } - // Include count of ID files generated for visibility - fields["id_files_count"] = len(rqidResp.RedundantMetadataFiles) - logtrace.Info(ctx, "register: rqid files generated", fields) - task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send) - - /* 9. Consistency checks ------------------------------------------------------- */ - if err := cascadekit.VerifySingleBlockIDs(layout, encResp.Metadata); err != nil { - return task.wrapErr(ctx, "failed to verify IDs", err, fields) - } - logtrace.Info(ctx, "register: rqids validated", fields) - task.streamEvent(SupernodeEventTypeRqIDsVerified, "RQIDs verified", "", send) - - /* 10. Simulate finalize to avoid storing artefacts if it would fail ---------- */ - if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqidResp.RQIDs); err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "register: finalize simulation failed", fields) - // Emit explicit simulation failure event for client visibility - task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) - return task.wrapErr(ctx, "finalize action simulation failed", err, fields) - } - logtrace.Info(ctx, "register: finalize simulation passed", fields) - // Transmit as a standard event so SDK can propagate it (dedicated type) - task.streamEvent(SupernodeEventTypeFinalizeSimulated, "Finalize simulation passed", "", send) - - /* 11. Persist artefacts -------------------------------------------------------- */ - // Persist artefacts to the P2P network. P2P interfaces return error only; - // metrics are summarized at the cascade layer and emitted via event. - if err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields); err != nil { - return err - } - // Emit artefacts stored event (metrics payload removed; logs preserved) - task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) - - resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "register: finalize action error", fields) - return task.wrapErr(ctx, "failed to finalize action", err, fields) - } - txHash := resp.TxResponse.TxHash - fields[logtrace.FieldTxHash] = txHash - logtrace.Info(ctx, "register: action finalized", fields) - task.streamEvent(SupernodeEventTypeActionFinalized, "Action finalized", txHash, send) - - return nil -} diff --git a/supernode/services/cascade/register_test.go b/supernode/services/cascade/register_test.go deleted file mode 100644 index 6f56791a..00000000 --- a/supernode/services/cascade/register_test.go +++ /dev/null @@ -1,315 +0,0 @@ -package cascade_test - -import ( - "context" - "encoding/base64" - "encoding/hex" - "encoding/json" - "os" - "testing" - - sdkmath "cosmossdk.io/math" - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" - codecpkg "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - cascadeadaptormocks "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors/mocks" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" - sdk "github.com/cosmos/cosmos-sdk/types" - sdktx "github.com/cosmos/cosmos-sdk/types/tx" - "github.com/cosmos/gogoproto/proto" - "lukechampine.com/blake3" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -func TestCascadeRegistrationTask_Register(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // Setup input file - tmpFile, err := os.CreateTemp("", "cascade-test-input") - assert.NoError(t, err) - - _, _ = tmpFile.WriteString("mock data") - - err = tmpFile.Close() // ✅ ensure it's flushed to disk - assert.NoError(t, err) - - rawHash, b64Hash := blake3HashRawAndBase64(t, tmpFile.Name()) - - tests := []struct { - name string - setupMocks func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) - expectedError string - expectedEvents int - }{ - { - name: "happy path", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) { - - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(&actiontypes.QueryGetActionResponse{ - Action: &actiontypes.Action{ - ActionID: "action123", - Creator: "creator1", - BlockHeight: 100, - Metadata: encodedCascadeMetadata(b64Hash, t), - Price: &sdk.Coin{ - Denom: "ulume", - Amount: sdkmath.NewInt(1000), - }, - }, - }, nil) - - // 2. Top SNs - lc.EXPECT(). - GetTopSupernodes(gomock.Any(), uint64(100)). - Return(&sntypes.QueryGetTopSuperNodesForBlockResponse{ - Supernodes: []*sntypes.SuperNode{ - { - SupernodeAccount: "lumera1abcxyz", // must match task.config.SupernodeAccountAddress - }, - }, - }, nil) - - // 3. Signature verification - layout signature on layout file - // Expect two verification calls: creator signature and layout signature - lc.EXPECT(). - Verify(gomock.Any(), "creator1", gomock.Any(), gomock.Any()). - Return(nil). - Times(2) - - // 4. Simulate finalize should pass - lc.EXPECT(). - SimulateFinalizeAction(gomock.Any(), "action123", gomock.Any()). - Return(&sdktx.SimulateResponse{}, nil) - - // 5. Finalize - lc.EXPECT(). - FinalizeAction(gomock.Any(), "action123", gomock.Any()). - Return(&sdktx.BroadcastTxResponse{TxResponse: &sdk.TxResponse{TxHash: "tx123"}}, nil) - - // 6. Params (if used in fee check) - lc.EXPECT().GetActionFee(gomock.Any(), "10").Return(&actiontypes.QueryGetActionFeeResponse{Amount: "1000"}, nil) - - // 7. Encode input - codec.EXPECT(). - EncodeInput(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Return(adaptors.EncodeResult{ - SymbolsDir: "/tmp", - Metadata: codecpkg.Layout{Blocks: []codecpkg.Block{{BlockID: 1, Hash: "abc"}}}, - }, nil) - - // 8. Store artefacts (no metrics returned; recorded centrally) - p2p.EXPECT(). - StoreArtefacts(gomock.Any(), gomock.Any(), gomock.Any()). - Return(nil) - }, - expectedError: "", - expectedEvents: 12, - }, - { - name: "get-action fails", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, _ *cascadeadaptormocks.MockCodecService, _ *cascadeadaptormocks.MockP2PService) { - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(nil, assert.AnError) - }, - expectedError: "assert.AnError general error", - expectedEvents: 0, - }, - { - name: "invalid data hash mismatch", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) { - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(&actiontypes.QueryGetActionResponse{ - Action: &actiontypes.Action{ - ActionID: "action123", - Creator: "creator1", - BlockHeight: 100, - Metadata: encodedCascadeMetadata("some-other-hash", t), // ⛔ incorrect hash - Price: &sdk.Coin{ - Denom: "ulume", - Amount: sdkmath.NewInt(1000), - }, - }, - }, nil) - - lc.EXPECT(). - GetTopSupernodes(gomock.Any(), uint64(100)). - Return(&sntypes.QueryGetTopSuperNodesForBlockResponse{ - Supernodes: []*sntypes.SuperNode{ - {SupernodeAccount: "lumera1abcxyz"}, - }, - }, nil) - - lc.EXPECT().GetActionFee(gomock.Any(), "10").Return(&actiontypes.QueryGetActionFeeResponse{Amount: "1000"}, nil) - }, - expectedError: "data hash doesn't match", - expectedEvents: 5, // up to metadata decoded - }, - { - name: "fee too low", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) { - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(&actiontypes.QueryGetActionResponse{ - Action: &actiontypes.Action{ - ActionID: "action123", - Creator: "creator1", - BlockHeight: 100, - Metadata: encodedCascadeMetadata(b64Hash, t), - Price: &sdk.Coin{ - Denom: "ulume", - Amount: sdkmath.NewInt(50), - }, - }, - }, nil) - - lc.EXPECT().GetActionFee(gomock.Any(), "10").Return(&actiontypes.QueryGetActionFeeResponse{Amount: "100"}, nil) - - }, - expectedError: "action fee is too low", - expectedEvents: 2, // until fee check - }, - { - name: "supernode not in top list", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) { - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(&actiontypes.QueryGetActionResponse{ - Action: &actiontypes.Action{ - ActionID: "action123", - Creator: "creator1", - BlockHeight: 100, - Metadata: encodedCascadeMetadata(b64Hash, t), - Price: &sdk.Coin{ - Denom: "ulume", - Amount: sdkmath.NewInt(1000), - }, - }, - }, nil) - - lc.EXPECT().GetActionFee(gomock.Any(), "10").Return(&actiontypes.QueryGetActionFeeResponse{Amount: "1000"}, nil) - - lc.EXPECT(). - GetTopSupernodes(gomock.Any(), uint64(100)). - Return(&sntypes.QueryGetTopSuperNodesForBlockResponse{ - Supernodes: []*sntypes.SuperNode{ - {SupernodeAccount: "other-supernode"}, - }, - }, nil) - }, - expectedError: "not eligible supernode", - expectedEvents: 2, // fails after fee verified - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - mockLumera := cascadeadaptormocks.NewMockLumeraClient(ctrl) - mockCodec := cascadeadaptormocks.NewMockCodecService(ctrl) - mockP2P := cascadeadaptormocks.NewMockP2PService(ctrl) - - tt.setupMocks(mockLumera, mockCodec, mockP2P) - - config := &cascade.Config{Config: common.Config{ - SupernodeAccountAddress: "lumera1abcxyz", - }, - } - - service := cascade.NewCascadeService( - config, - nil, nil, nil, nil, - ) - - service.LumeraClient = mockLumera - service.P2P = mockP2P - service.RQ = mockCodec - // Inject mocks for adaptors - task := cascade.NewCascadeRegistrationTask(service) - - req := &cascade.RegisterRequest{ - TaskID: "task1", - ActionID: "action123", - DataHash: rawHash, - DataSize: 10240, - FilePath: tmpFile.Name(), - } - - var events []cascade.RegisterResponse - err := task.Register(context.Background(), req, func(resp *cascade.RegisterResponse) error { - events = append(events, *resp) - return nil - }) - - if tt.expectedError != "" { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Len(t, events, tt.expectedEvents) - } - }) - } -} - -func encodedCascadeMetadata(hash string, t *testing.T) []byte { - t.Helper() - - // Fake layout signature for new index file format - fakeLayoutSig := base64.StdEncoding.EncodeToString([]byte("fakelayoutsignature")) - - // Create index file structure - indexFile := map[string]any{ - "layout_ids": []string{"layout_id_1", "layout_id_2"}, - "layout_signature": fakeLayoutSig, - } - indexFileJSON, _ := json.Marshal(indexFile) - fakeIndexFile := base64.StdEncoding.EncodeToString(indexFileJSON) - - // Fake creators signature - this is what the chain uses for index ID generation - fakeCreatorsSig := base64.StdEncoding.EncodeToString([]byte("fakecreatorssignature")) - - metadata := &actiontypes.CascadeMetadata{ - DataHash: hash, - FileName: "file.txt", - RqIdsIc: 2, - RqIdsMax: 4, - RqIdsIds: []string{"id1", "id2"}, - Signatures: fakeIndexFile + "." + fakeCreatorsSig, - } - - bytes, err := proto.Marshal(metadata) - if err != nil { - t.Fatalf("failed to marshal CascadeMetadata: %v", err) - } - - return bytes -} - -func blake3HashRawAndBase64(t *testing.T, path string) ([]byte, string) { - t.Helper() - - data, err := os.ReadFile(path) - if err != nil { - t.Fatal(err) - } - - hash := blake3.Sum256(data) - raw := hash[:] - b64 := base64.StdEncoding.EncodeToString(raw) - return raw, b64 -} - -func decodeHexOrDie(hexStr string) []byte { - bz, err := hex.DecodeString(hexStr) - if err != nil { - panic(err) - } - return bz -} diff --git a/supernode/services/cascade/service.go b/supernode/services/cascade/service.go deleted file mode 100644 index 3619d7d1..00000000 --- a/supernode/services/cascade/service.go +++ /dev/null @@ -1,47 +0,0 @@ -package cascade - -import ( - "context" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/base" -) - -type CascadeService struct { - *base.SuperNodeService - config *Config - - LumeraClient adaptors.LumeraClient - P2P adaptors.P2PService - RQ adaptors.CodecService -} - -// Compile-time checks to ensure CascadeService implements required interfaces -var _ CascadeServiceFactory = (*CascadeService)(nil) - -// NewCascadeRegistrationTask creates a new task for cascade registration -func (service *CascadeService) NewCascadeRegistrationTask() CascadeTask { - task := NewCascadeRegistrationTask(service) - service.Worker.AddTask(task) - return task -} - -// Run starts the service -func (service *CascadeService) Run(ctx context.Context) error { - return service.RunHelper(ctx, service.config.SupernodeAccountAddress, logPrefix) -} - -// NewCascadeService returns a new CascadeService instance -func NewCascadeService(config *Config, lumera lumera.Client, p2pClient p2p.Client, codec codec.Codec, rqstore rqstore.Store) *CascadeService { - return &CascadeService{ - config: config, - SuperNodeService: base.NewSuperNodeService(p2pClient), - LumeraClient: adaptors.NewLumeraClient(lumera), - P2P: adaptors.NewP2PService(p2pClient, rqstore), - RQ: adaptors.NewCodecService(codec), - } -} diff --git a/supernode/services/cascade/service_test.go b/supernode/services/cascade/service_test.go deleted file mode 100644 index bc2998ad..00000000 --- a/supernode/services/cascade/service_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package cascade_test - -import ( - "context" - "testing" - "time" - - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - cascadeadaptormocks "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors/mocks" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -func TestNewCascadeService(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockLumera := cascadeadaptormocks.NewMockLumeraClient(ctrl) - mockP2P := cascadeadaptormocks.NewMockP2PService(ctrl) - mockCodec := cascadeadaptormocks.NewMockCodecService(ctrl) - - config := &cascade.Config{ - Config: common.Config{ - SupernodeAccountAddress: "lumera1abcxyz", - }, - } - - service := cascade.NewCascadeService(config, nil, nil, nil, nil) - service.LumeraClient = mockLumera - service.RQ = mockCodec - service.P2P = mockP2P - - assert.NotNil(t, service) - assert.NotNil(t, service.LumeraClient) - assert.NotNil(t, service.P2P) - assert.NotNil(t, service.RQ) -} - -func TestNewCascadeRegistrationTask(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockLumera := cascadeadaptormocks.NewMockLumeraClient(ctrl) - mockP2P := cascadeadaptormocks.NewMockP2PService(ctrl) - mockCodec := cascadeadaptormocks.NewMockCodecService(ctrl) - - config := &cascade.Config{ - Config: common.Config{ - SupernodeAccountAddress: "lumera1abcxyz", - }, - } - - service := cascade.NewCascadeService(config, nil, nil, nil, nil) - service.LumeraClient = mockLumera - service.RQ = mockCodec - service.P2P = mockP2P - - task := cascade.NewCascadeRegistrationTask(service) - assert.NotNil(t, task) - - go func() { - service.Worker.AddTask(task) - }() - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - defer cancel() - - err := service.RunHelper(ctx, "node-id", "prefix") - assert.NoError(t, err) -} diff --git a/supernode/services/cascade/status.go b/supernode/services/cascade/status.go deleted file mode 100644 index 64772443..00000000 --- a/supernode/services/cascade/status.go +++ /dev/null @@ -1,19 +0,0 @@ -package cascade - -import ( - "context" - - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" -) - -// StatusResponse represents the status response for cascade service -type StatusResponse = supernode.StatusResponse - -// GetStatus delegates to the common supernode status service -func (service *CascadeService) GetStatus(ctx context.Context) (StatusResponse, error) { - // Create a status service - // Pass nil for optional dependencies (P2P, lumera client, and config) - // as cascade service doesn't have access to them in this context - statusService := supernode.NewSupernodeStatusService(nil, nil, nil) - return statusService.GetStatus(ctx, false) -} diff --git a/supernode/services/cascade/status_test.go b/supernode/services/cascade/status_test.go deleted file mode 100644 index 1a15c694..00000000 --- a/supernode/services/cascade/status_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package cascade - -import ( - "context" - "testing" - - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/base" - "github.com/stretchr/testify/assert" -) - -func TestGetStatus(t *testing.T) { - ctx := context.Background() - - tests := []struct { - name string - taskCount int - expectErr bool - }{ - {name: "no tasks", taskCount: 0, expectErr: false}, - {name: "one task", taskCount: 1, expectErr: false}, - {name: "multiple tasks", taskCount: 3, expectErr: false}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Setup service and worker - service := &CascadeService{ - SuperNodeService: base.NewSuperNodeService(nil), - } - - go func() { - service.RunHelper(ctx, "node-id", "prefix") - }() - - // Register tasks - for i := 0; i < tt.taskCount; i++ { - task := NewCascadeRegistrationTask(service) - service.Worker.AddTask(task) - } - - // Call GetStatus from service - resp, err := service.GetStatus(ctx) - if tt.expectErr { - assert.Error(t, err) - return - } - - assert.NoError(t, err) - - // Version check - assert.NotEmpty(t, resp.Version) - - // Uptime check - assert.True(t, resp.UptimeSeconds >= 0) - - // CPU checks - assert.True(t, resp.Resources.CPU.UsagePercent >= 0) - assert.True(t, resp.Resources.CPU.UsagePercent <= 100) - assert.True(t, resp.Resources.CPU.Cores >= 0) - - // Memory checks (now in GB) - assert.True(t, resp.Resources.Memory.TotalGB > 0) - assert.True(t, resp.Resources.Memory.UsedGB <= resp.Resources.Memory.TotalGB) - assert.True(t, resp.Resources.Memory.UsagePercent >= 0 && resp.Resources.Memory.UsagePercent <= 100) - - // Hardware summary check - if resp.Resources.CPU.Cores > 0 && resp.Resources.Memory.TotalGB > 0 { - assert.NotEmpty(t, resp.Resources.HardwareSummary) - } - - // Storage checks - should have default root filesystem - assert.NotEmpty(t, resp.Resources.Storage) - assert.Equal(t, "/", resp.Resources.Storage[0].Path) - - // Registered services is populated at server layer; cascade service returns none - assert.Empty(t, resp.RegisteredServices) - - // Check new fields have default values (since service doesn't have access to P2P/lumera/config) - assert.Equal(t, int32(0), resp.Network.PeersCount) - assert.Empty(t, resp.Network.PeerAddresses) - assert.Equal(t, int32(0), resp.Rank) - assert.Empty(t, resp.IPAddress) - }) - } -} diff --git a/supernode/services/cascade/task.go b/supernode/services/cascade/task.go deleted file mode 100644 index 43eb1181..00000000 --- a/supernode/services/cascade/task.go +++ /dev/null @@ -1,56 +0,0 @@ -package cascade - -import ( - "context" - - "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/base" -) - -// CascadeRegistrationTask is the task for cascade registration -type CascadeRegistrationTask struct { - *CascadeService - - *base.SuperNodeTask - - Asset *files.File - dataHash string - creatorSignature []byte -} - -const ( - logPrefix = "cascade" -) - -// Compile-time check to ensure CascadeRegistrationTask implements CascadeTask interface -var _ CascadeTask = (*CascadeRegistrationTask)(nil) - -// Run starts the task -func (task *CascadeRegistrationTask) Run(ctx context.Context) error { - return task.RunHelper(ctx, task.removeArtifacts) -} - -// removeArtifacts cleans up any files created during processing -func (task *CascadeRegistrationTask) removeArtifacts() { - task.RemoveFile(task.Asset) -} - -// NewCascadeRegistrationTask returns a new Task instance -func NewCascadeRegistrationTask(service *CascadeService) *CascadeRegistrationTask { - task := &CascadeRegistrationTask{ - SuperNodeTask: base.NewSuperNodeTask(logPrefix), - CascadeService: service, - } - - return task -} - -func (task *CascadeRegistrationTask) streamEvent(eventType SupernodeEventType, msg, txHash string, send func(resp *RegisterResponse) error) { - _ = send(&RegisterResponse{ - EventType: eventType, - Message: msg, - TxHash: txHash, - }) - - return -} diff --git a/supernode/services/common/base/supernode_service.go b/supernode/services/common/base/supernode_service.go deleted file mode 100644 index 424556b0..00000000 --- a/supernode/services/common/base/supernode_service.go +++ /dev/null @@ -1,70 +0,0 @@ -package base - -import ( - "context" - "time" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/common/task" - "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" -) - -// SuperNodeServiceInterface common interface for Services -type SuperNodeServiceInterface interface { - RunHelper(ctx context.Context) error - NewTask() task.Task - Task(id string) task.Task -} - -// SuperNodeService common "class" for Services -type SuperNodeService struct { - *task.Worker - P2PClient p2p.Client -} - -// run starts task -func (service *SuperNodeService) run(ctx context.Context, nodeID string, prefix string) error { - ctx = logtrace.CtxWithCorrelationID(ctx, prefix) - - if nodeID == "" { - return errors.New("PastelID is not specified in the config file") - } - - group, ctx := errgroup.WithContext(ctx) - group.Go(func() error { - return service.Worker.Run(ctx) - }) - - return group.Wait() -} - -// RunHelper common code for Service runner -func (service *SuperNodeService) RunHelper(ctx context.Context, nodeID string, prefix string) error { - for { - select { - case <-ctx.Done(): - logtrace.Error(ctx, "context done - closing sn services", logtrace.Fields{logtrace.FieldModule: "supernode"}) - return nil - case <-time.After(5 * time.Second): - if err := service.run(ctx, nodeID, prefix); err != nil { - service.Worker = task.NewWorker() - logtrace.Error(ctx, "Service run failed, retrying", logtrace.Fields{logtrace.FieldModule: "supernode", logtrace.FieldError: err.Error()}) - } else { - logtrace.Debug(ctx, "Service run completed successfully - closing sn services", logtrace.Fields{logtrace.FieldModule: "supernode"}) - return nil - } - } - } -} - -// NewSuperNodeService creates SuperNodeService -func NewSuperNodeService( - p2pClient p2p.Client, -) *SuperNodeService { - return &SuperNodeService{ - Worker: task.NewWorker(), - P2PClient: p2pClient, - } -} diff --git a/supernode/services/common/base/supernode_task.go b/supernode/services/common/base/supernode_task.go deleted file mode 100644 index 2908558d..00000000 --- a/supernode/services/common/base/supernode_task.go +++ /dev/null @@ -1,71 +0,0 @@ -package base - -import ( - "context" - "fmt" - - "github.com/LumeraProtocol/supernode/v2/pkg/common/task" - "github.com/LumeraProtocol/supernode/v2/pkg/common/task/state" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" -) - -// TaskCleanerFunc pointer to func that removes artefacts -type TaskCleanerFunc func() - -// SuperNodeTask base "class" for Task -type SuperNodeTask struct { - task.Task - - LogPrefix string -} - -// RunHelper common code for Task runner -func (task *SuperNodeTask) RunHelper(ctx context.Context, clean TaskCleanerFunc) error { - ctx = task.context(ctx) - logtrace.Debug(ctx, "Start task", logtrace.Fields{}) - defer logtrace.Debug(ctx, "Task canceled", logtrace.Fields{}) - defer task.Cancel() - - task.SetStatusNotifyFunc(func(status *state.Status) { - logtrace.Debug(ctx, "States updated", logtrace.Fields{"status": status.String()}) - }) - - defer clean() - - err := task.RunAction(ctx) - - // Update task status based on completion result - if err != nil { - task.UpdateStatus(common.StatusTaskCanceled) - } else { - task.UpdateStatus(common.StatusTaskCompleted) - } - - return err -} - -func (task *SuperNodeTask) context(ctx context.Context) context.Context { - return logtrace.CtxWithCorrelationID(ctx, fmt.Sprintf("%s-%s", task.LogPrefix, task.ID())) -} - -// RemoveFile removes file from FS (TODO: move to gonode.common) -func (task *SuperNodeTask) RemoveFile(file *files.File) { - if file != nil { - logtrace.Debug(context.Background(), "remove file", logtrace.Fields{"filename": file.Name()}) - if err := file.Remove(); err != nil { - logtrace.Debug(context.Background(), "remove file failed", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - } -} - -// NewSuperNodeTask returns a new Task instance. -func NewSuperNodeTask(logPrefix string) *SuperNodeTask { - snt := &SuperNodeTask{ - Task: task.New(common.StatusTaskStarted), - LogPrefix: logPrefix, - } - - return snt -} diff --git a/supernode/services/common/base/supernode_task_test.go b/supernode/services/common/base/supernode_task_test.go deleted file mode 100644 index 9e108f59..00000000 --- a/supernode/services/common/base/supernode_task_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package base - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestNewSuperNodeTask(t *testing.T) { - task := NewSuperNodeTask("testprefix") - assert.NotNil(t, task) - assert.Equal(t, "testprefix", task.LogPrefix) -} - -func TestSuperNodeTask_RunHelper(t *testing.T) { - called := false - cleaner := func() { - called = true - } - - snt := NewSuperNodeTask("log") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Run the helper in a goroutine - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - err := snt.RunHelper(ctx, cleaner) - assert.NoError(t, err) - }() - - // Give the RunHelper some time to start and block on actionCh - time.Sleep(10 * time.Millisecond) - - // Submit dummy action to allow RunAction to proceed - done := snt.NewAction(func(ctx context.Context) error { - return nil - }) - - <-done // wait for action to complete - - snt.CloseActionCh() // close to allow RunAction to return - wg.Wait() // wait for RunHelper to exit - - assert.True(t, called) -} - -func TestSuperNodeTask_RunHelper_WithError(t *testing.T) { - snt := NewSuperNodeTask("log") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var wg sync.WaitGroup - wg.Add(1) - - var runErr error - go func() { - defer wg.Done() - runErr = snt.RunHelper(ctx, func() {}) - }() - - // Give RunHelper time to start - time.Sleep(10 * time.Millisecond) - - done := snt.NewAction(func(ctx context.Context) error { - return fmt.Errorf("fail") - }) - - <-done // wait for the action to complete - snt.CloseActionCh() // allow RunAction to exit - wg.Wait() // wait for RunHelper to return - - assert.EqualError(t, runErr, "fail") -} diff --git a/supernode/services/common/config.go b/supernode/services/common/config.go deleted file mode 100644 index 684d1fd1..00000000 --- a/supernode/services/common/config.go +++ /dev/null @@ -1,19 +0,0 @@ -package common - -const ( - defaultNumberSuperNodes = 10 -) - -// Config contains common configuration of the services. -type Config struct { - SupernodeAccountAddress string - SupernodeIPAddress string - NumberSuperNodes int -} - -// NewConfig returns a new Config instance -func NewConfig() *Config { - return &Config{ - NumberSuperNodes: defaultNumberSuperNodes, - } -} diff --git a/supernode/services/common/supernode/service.go b/supernode/services/common/supernode/service.go deleted file mode 100644 index 1e707e03..00000000 --- a/supernode/services/common/supernode/service.go +++ /dev/null @@ -1,262 +0,0 @@ -package supernode - -import ( - "context" - "fmt" - "time" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/supernode/config" -) - -// Version is the supernode version, set by the main application -var Version = "dev" - -// SupernodeStatusService provides centralized status information -// by collecting system metrics and aggregating task information from registered services -type SupernodeStatusService struct { - metrics *MetricsCollector // System metrics collector for CPU and memory stats - storagePaths []string // Paths to monitor for storage metrics - startTime time.Time // Service start time for uptime calculation - p2pService p2p.Client // P2P service for network information - lumeraClient lumera.Client // Lumera client for blockchain queries - config *config.Config // Supernode configuration -} - -// NewSupernodeStatusService creates a new supernode status service instance -func NewSupernodeStatusService(p2pService p2p.Client, lumeraClient lumera.Client, cfg *config.Config) *SupernodeStatusService { - return &SupernodeStatusService{ - metrics: NewMetricsCollector(), - storagePaths: []string{"/"}, // Default to monitoring root filesystem - startTime: time.Now(), - p2pService: p2pService, - lumeraClient: lumeraClient, - config: cfg, - } -} - -// GetStatus returns the current system status including all registered services -// This method collects CPU metrics, memory usage, and task information from all providers -func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetrics bool) (StatusResponse, error) { - fields := logtrace.Fields{ - logtrace.FieldMethod: "GetStatus", - logtrace.FieldModule: "SupernodeStatusService", - } - logtrace.Debug(ctx, "status request received", fields) - - var resp StatusResponse - resp.Version = Version - - // Calculate uptime - resp.UptimeSeconds = uint64(time.Since(s.startTime).Seconds()) - - // Collect CPU metrics - cpuUsage, err := s.metrics.CollectCPUMetrics(ctx) - if err != nil { - return resp, err - } - resp.Resources.CPU.UsagePercent = cpuUsage - - // Get CPU cores - cpuCores, err := s.metrics.GetCPUCores(ctx) - if err != nil { - // Log error but continue - non-critical - logtrace.Error(ctx, "failed to get cpu cores", logtrace.Fields{logtrace.FieldError: err.Error()}) - cpuCores = 0 - } - resp.Resources.CPU.Cores = cpuCores - - // Collect memory metrics - memTotal, memUsed, memAvailable, memUsedPerc, err := s.metrics.CollectMemoryMetrics(ctx) - if err != nil { - return resp, err - } - - // Convert to GB - const bytesToGB = 1024 * 1024 * 1024 - resp.Resources.Memory.TotalGB = float64(memTotal) / bytesToGB - resp.Resources.Memory.UsedGB = float64(memUsed) / bytesToGB - resp.Resources.Memory.AvailableGB = float64(memAvailable) / bytesToGB - resp.Resources.Memory.UsagePercent = memUsedPerc - - // Generate hardware summary - if cpuCores > 0 && resp.Resources.Memory.TotalGB > 0 { - resp.Resources.HardwareSummary = fmt.Sprintf("%d cores / %.0fGB RAM", cpuCores, resp.Resources.Memory.TotalGB) - } - - // Collect storage metrics - resp.Resources.Storage = s.metrics.CollectStorageMetrics(ctx, s.storagePaths) - - // Task tracking removed; RegisteredServices populated at server layer - - // Initialize network info - resp.Network = NetworkInfo{ - PeersCount: 0, - PeerAddresses: []string{}, - } - - // Prepare P2P metrics container (always present in response) - metrics := P2PMetrics{ - NetworkHandleMetrics: map[string]HandleCounters{}, - ConnPoolMetrics: map[string]int64{}, - BanList: []BanEntry{}, - } - - // Collect P2P network information and metrics (fill when available and requested) - if includeP2PMetrics && s.p2pService != nil { - p2pStats, err := s.p2pService.Stats(ctx) - if err != nil { - // Log error but continue - non-critical - logtrace.Error(ctx, "failed to get p2p stats", logtrace.Fields{logtrace.FieldError: err.Error()}) - } else { - if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { - if peersCount, ok := dhtStats["peers_count"].(int); ok { - resp.Network.PeersCount = int32(peersCount) - } - - // Extract peer addresses - if peers, ok := dhtStats["peers"].([]*kademlia.Node); ok { - resp.Network.PeerAddresses = make([]string, 0, len(peers)) - for _, peer := range peers { - // Format peer address as "ID@IP:Port" - peerAddr := fmt.Sprintf("%s@%s:%d", string(peer.ID), peer.IP, peer.Port) - resp.Network.PeerAddresses = append(resp.Network.PeerAddresses, peerAddr) - } - } else { - resp.Network.PeerAddresses = []string{} - } - } - - // Disk info - if du, ok := p2pStats["disk-info"].(utils.DiskStatus); ok { - metrics.Disk = DiskStatus{AllMB: du.All, UsedMB: du.Used, FreeMB: du.Free} - } else if duPtr, ok := p2pStats["disk-info"].(*utils.DiskStatus); ok && duPtr != nil { - metrics.Disk = DiskStatus{AllMB: duPtr.All, UsedMB: duPtr.Used, FreeMB: duPtr.Free} - } - - // Ban list - if bans, ok := p2pStats["ban-list"].([]kademlia.BanSnapshot); ok { - for _, b := range bans { - metrics.BanList = append(metrics.BanList, BanEntry{ - ID: b.ID, - IP: b.IP, - Port: uint32(b.Port), - Count: int32(b.Count), - CreatedAtUnix: b.CreatedAt.Unix(), - AgeSeconds: int64(b.Age.Seconds()), - }) - } - } - - // Conn pool metrics - if pool, ok := p2pStats["conn-pool"].(map[string]int64); ok { - for k, v := range pool { - metrics.ConnPoolMetrics[k] = v - } - } - - // DHT metrics and database/network counters live inside dht map - if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { - // Database - if db, ok := dhtStats["database"].(map[string]interface{}); ok { - var sizeMB float64 - if v, ok := db["p2p_db_size"].(float64); ok { - sizeMB = v - } - var recs int64 - switch v := db["p2p_db_records_count"].(type) { - case int: - recs = int64(v) - case int64: - recs = v - case float64: - recs = int64(v) - } - metrics.Database = DatabaseStats{P2PDBSizeMB: sizeMB, P2PDBRecordsCount: recs} - } - - // Network handle metrics - if nhm, ok := dhtStats["network"].(map[string]kademlia.HandleCounters); ok { - for k, c := range nhm { - metrics.NetworkHandleMetrics[k] = HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} - } - } else if nhmI, ok := dhtStats["network"].(map[string]interface{}); ok { - for k, vi := range nhmI { - if c, ok := vi.(kademlia.HandleCounters); ok { - metrics.NetworkHandleMetrics[k] = HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} - } - } - } - - // Detailed recent per-request lists removed from API mapping - } - - // DHT rolling metrics snapshot is attached at top-level under dht_metrics - if snap, ok := p2pStats["dht_metrics"].(kademlia.DHTMetricsSnapshot); ok { - // Store success - for _, p := range snap.StoreSuccessRecent { - metrics.DhtMetrics.StoreSuccessRecent = append(metrics.DhtMetrics.StoreSuccessRecent, StoreSuccessPoint{ - TimeUnix: p.Time.Unix(), - Requests: int32(p.Requests), - Successful: int32(p.Successful), - SuccessRate: p.SuccessRate, - }) - } - // Batch retrieve - for _, p := range snap.BatchRetrieveRecent { - metrics.DhtMetrics.BatchRetrieveRecent = append(metrics.DhtMetrics.BatchRetrieveRecent, BatchRetrievePoint{ - TimeUnix: p.Time.Unix(), - Keys: int32(p.Keys), - Required: int32(p.Required), - FoundLocal: int32(p.FoundLocal), - FoundNetwork: int32(p.FoundNet), - DurationMS: p.Duration.Milliseconds(), - }) - } - metrics.DhtMetrics.HotPathBannedSkips = snap.HotPathBannedSkips - metrics.DhtMetrics.HotPathBanIncrements = snap.HotPathBanIncrements - } - } - } - - // Always include metrics (may be empty if not available) - resp.P2PMetrics = metrics - - // Calculate rank from top supernodes - if s.lumeraClient != nil && s.config != nil { - // Get current block height - blockInfo, err := s.lumeraClient.Node().GetLatestBlock(ctx) - if err != nil { - // Log error but continue - non-critical - logtrace.Error(ctx, "failed to get latest block", logtrace.Fields{logtrace.FieldError: err.Error()}) - } else { - // Get top supernodes for current block - topNodes, err := s.lumeraClient.SuperNode().GetTopSuperNodesForBlock(ctx, uint64(blockInfo.SdkBlock.Header.Height)) - if err != nil { - // Log error but continue - non-critical - logtrace.Error(ctx, "failed to get top supernodes", logtrace.Fields{logtrace.FieldError: err.Error()}) - } else { - // Find our rank - for idx, node := range topNodes.Supernodes { - if node.SupernodeAccount == s.config.SupernodeConfig.Identity { - resp.Rank = int32(idx + 1) // Rank starts from 1 - break - } - } - } - } - } - - if s.config != nil && s.lumeraClient != nil { - if supernodeInfo, err := s.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, s.config.SupernodeConfig.Identity); err == nil && supernodeInfo != nil { - resp.IPAddress = supernodeInfo.LatestAddress - } - - } - - return resp, nil -} diff --git a/supernode/services/common/supernode/service_test.go b/supernode/services/common/supernode/service_test.go deleted file mode 100644 index 2a499156..00000000 --- a/supernode/services/common/supernode/service_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package supernode - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSupernodeStatusService(t *testing.T) { - ctx := context.Background() - - t.Run("empty service", func(t *testing.T) { - statusService := NewSupernodeStatusService(nil, nil, nil) - - resp, err := statusService.GetStatus(ctx, false) - assert.NoError(t, err) - - // Should have version info - assert.NotEmpty(t, resp.Version) - - // Should have uptime - assert.True(t, resp.UptimeSeconds >= 0) - - // Should have CPU and Memory info - assert.True(t, resp.Resources.CPU.UsagePercent >= 0) - assert.True(t, resp.Resources.CPU.UsagePercent <= 100) - assert.True(t, resp.Resources.CPU.Cores >= 0) - assert.True(t, resp.Resources.Memory.TotalGB > 0) - assert.True(t, resp.Resources.Memory.UsagePercent >= 0) - assert.True(t, resp.Resources.Memory.UsagePercent <= 100) - - // Should have hardware summary if cores and memory are available - if resp.Resources.CPU.Cores > 0 && resp.Resources.Memory.TotalGB > 0 { - assert.NotEmpty(t, resp.Resources.HardwareSummary) - } - - // Should have storage info (default root filesystem) - assert.NotEmpty(t, resp.Resources.Storage) - assert.Equal(t, "/", resp.Resources.Storage[0].Path) - - // Registered services now populated at server layer; status service leaves empty - assert.Empty(t, resp.RegisteredServices) - - // Should have default values for new fields - assert.Equal(t, int32(0), resp.Network.PeersCount) - assert.Empty(t, resp.Network.PeerAddresses) - assert.Equal(t, int32(0), resp.Rank) - assert.Empty(t, resp.IPAddress) - }) -} diff --git a/supernode/services/common/supernode/types.go b/supernode/services/common/supernode/types.go deleted file mode 100644 index 39579502..00000000 --- a/supernode/services/common/supernode/types.go +++ /dev/null @@ -1,111 +0,0 @@ -package supernode - -// StatusResponse represents the complete system status information -// with clear organization of resources and services -type StatusResponse struct { - Version string // Supernode version - UptimeSeconds uint64 // Uptime in seconds - Resources Resources // System resource information - RegisteredServices []string // All registered/available services - Network NetworkInfo // P2P network information - Rank int32 // Rank in the top supernodes list (0 if not in top list) - IPAddress string // Supernode IP address with port (e.g., "192.168.1.1:4445") - P2PMetrics P2PMetrics // Detailed P2P metrics snapshot -} - -// Resources contains system resource metrics -type Resources struct { - CPU CPUInfo // CPU usage information - Memory MemoryInfo // Memory usage information - Storage []StorageInfo // Storage volumes information - HardwareSummary string // Formatted hardware summary (e.g., "8 cores / 32GB RAM") -} - -// CPUInfo contains CPU usage metrics -type CPUInfo struct { - UsagePercent float64 // CPU usage percentage (0-100) - Cores int32 // Number of CPU cores -} - -// MemoryInfo contains memory usage metrics -type MemoryInfo struct { - TotalGB float64 // Total memory in GB - UsedGB float64 // Used memory in GB - AvailableGB float64 // Available memory in GB - UsagePercent float64 // Memory usage percentage (0-100) -} - -// StorageInfo contains storage metrics for a specific path -type StorageInfo struct { - Path string // Storage path being monitored - TotalBytes uint64 // Total storage in bytes - UsedBytes uint64 // Used storage in bytes - AvailableBytes uint64 // Available storage in bytes - UsagePercent float64 // Storage usage percentage (0-100) -} - -// NetworkInfo contains P2P network information -type NetworkInfo struct { - PeersCount int32 // Number of connected peers in P2P network - PeerAddresses []string // List of connected peer addresses (optional, may be empty for privacy) -} - -// P2PMetrics mirrors the proto P2P metrics for status API -type P2PMetrics struct { - DhtMetrics DhtMetrics - NetworkHandleMetrics map[string]HandleCounters - ConnPoolMetrics map[string]int64 - BanList []BanEntry - Database DatabaseStats - Disk DiskStatus -} - -type StoreSuccessPoint struct { - TimeUnix int64 - Requests int32 - Successful int32 - SuccessRate float64 -} - -type BatchRetrievePoint struct { - TimeUnix int64 - Keys int32 - Required int32 - FoundLocal int32 - FoundNetwork int32 - DurationMS int64 -} - -type DhtMetrics struct { - StoreSuccessRecent []StoreSuccessPoint - BatchRetrieveRecent []BatchRetrievePoint - HotPathBannedSkips int64 - HotPathBanIncrements int64 -} - -type HandleCounters struct { - Total int64 - Success int64 - Failure int64 - Timeout int64 -} - -type BanEntry struct { - ID string - IP string - Port uint32 - Count int32 - CreatedAtUnix int64 - AgeSeconds int64 -} - -type DatabaseStats struct { - P2PDBSizeMB float64 - P2PDBRecordsCount int64 -} - -type DiskStatus struct { - AllMB float64 - UsedMB float64 - FreeMB float64 -} diff --git a/supernode/services/common/task_status.go b/supernode/services/common/task_status.go deleted file mode 100644 index 22b63b7a..00000000 --- a/supernode/services/common/task_status.go +++ /dev/null @@ -1,51 +0,0 @@ -package common - -// List of task statuses. -const ( - StatusTaskStarted Status = iota - // Mode - StatusPrimaryMode - StatusSecondaryMode - - // Process - StatusConnected - - // Final - StatusTaskCanceled - StatusTaskCompleted -) - -var statusNames = map[Status]string{ - StatusTaskStarted: "Task started", - StatusTaskCanceled: "Task Canceled", - StatusTaskCompleted: "Task Completed", -} - -// Status represents status of the task -type Status byte - -func (status Status) String() string { - if name, ok := statusNames[status]; ok { - return name - } - return "" -} - -// IsFinal returns true if the status is the final. -func (status Status) IsFinal() bool { - return status == StatusTaskCanceled || status == StatusTaskCompleted -} - -// IsFailure returns true if the task failed due to an error -func (status Status) IsFailure() bool { - return status == StatusTaskCanceled -} - -// StatusNames returns a sorted list of status names. -func StatusNames() []string { - list := make([]string, len(statusNames)) - for i, name := range statusNames { - list[i] = name - } - return list -} diff --git a/supernode/services/common/task_status_test.go b/supernode/services/common/task_status_test.go deleted file mode 100644 index b9853120..00000000 --- a/supernode/services/common/task_status_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package common - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestStatus_String(t *testing.T) { - tests := []struct { - status Status - expected string - }{ - {StatusTaskStarted, "Task started"}, - {StatusTaskCanceled, "Task Canceled"}, - {StatusTaskCompleted, "Task Completed"}, - {StatusPrimaryMode, ""}, - {StatusSecondaryMode, ""}, - {StatusConnected, ""}, - {Status(255), ""}, // unknown status - } - - for _, tt := range tests { - assert.Equal(t, tt.expected, tt.status.String(), "Status.String() should match expected name") - } -} - -func TestStatus_IsFinal(t *testing.T) { - tests := []struct { - status Status - expected bool - }{ - {StatusTaskStarted, false}, - {StatusPrimaryMode, false}, - {StatusSecondaryMode, false}, - {StatusConnected, false}, - {StatusTaskCanceled, true}, - {StatusTaskCompleted, true}, - } - - for _, tt := range tests { - assert.Equal(t, tt.expected, tt.status.IsFinal(), "Status.IsFinal() mismatch") - } -} - -func TestStatus_IsFailure(t *testing.T) { - tests := []struct { - status Status - expected bool - }{ - {StatusTaskStarted, false}, - {StatusTaskCanceled, true}, - {StatusTaskCompleted, false}, - } - - for _, tt := range tests { - assert.Equal(t, tt.expected, tt.status.IsFailure(), "Status.IsFailure() mismatch") - } -} diff --git a/supernode/services/verifier/interface.go b/supernode/services/verifier/interface.go deleted file mode 100644 index 7414201a..00000000 --- a/supernode/services/verifier/interface.go +++ /dev/null @@ -1,55 +0,0 @@ -package verifier - -import ( - "context" - "strings" -) - -// ConfigVerifierService defines the interface for config verification service -type ConfigVerifierService interface { - // VerifyConfig performs comprehensive config validation against chain - VerifyConfig(ctx context.Context) (*VerificationResult, error) -} - -// VerificationResult contains the results of config verification -type VerificationResult struct { - Valid bool `json:"valid"` - Errors []ConfigError `json:"errors,omitempty"` - Warnings []ConfigError `json:"warnings,omitempty"` -} - -// ConfigError represents a configuration validation error or warning -type ConfigError struct { - Field string `json:"field"` - Expected string `json:"expected,omitempty"` - Actual string `json:"actual,omitempty"` - Message string `json:"message"` -} - -// IsValid returns true if all verifications passed -func (vr *VerificationResult) IsValid() bool { - return vr.Valid && len(vr.Errors) == 0 -} - -// HasWarnings returns true if there are any warnings -func (vr *VerificationResult) HasWarnings() bool { - return len(vr.Warnings) > 0 -} - -// Summary returns a human-readable summary of verification results -func (vr *VerificationResult) Summary() string { - if vr.IsValid() && !vr.HasWarnings() { - return "✓ Config verification successful" - } - - var summary string - for _, err := range vr.Errors { - summary += "✗ " + err.Message + "\n" - } - - for _, warn := range vr.Warnings { - summary += "⚠ " + warn.Message + "\n" - } - - return strings.TrimSuffix(summary, "\n") -} diff --git a/supernode/services/verifier/verifier.go b/supernode/services/verifier/verifier.go deleted file mode 100644 index 68a2ae77..00000000 --- a/supernode/services/verifier/verifier.go +++ /dev/null @@ -1,222 +0,0 @@ -package verifier - -import ( - "context" - "fmt" - "net" - - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - snmodule "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/cosmos/cosmos-sdk/crypto/keyring" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// ConfigVerifier implements ConfigVerifierService -type ConfigVerifier struct { - config *config.Config - lumeraClient lumera.Client - keyring keyring.Keyring -} - -// NewConfigVerifier creates a new config verifier service -func NewConfigVerifier(cfg *config.Config, client lumera.Client, kr keyring.Keyring) ConfigVerifierService { - return &ConfigVerifier{ - config: cfg, - lumeraClient: client, - keyring: kr, - } -} - -// VerifyConfig performs comprehensive config validation against chain -func (cv *ConfigVerifier) VerifyConfig(ctx context.Context) (*VerificationResult, error) { - result := &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{}, - } - - logtrace.Debug(ctx, "Starting config verification", logtrace.Fields{ - "identity": cv.config.SupernodeConfig.Identity, - "key_name": cv.config.SupernodeConfig.KeyName, - "p2p_port": cv.config.P2PConfig.Port, - }) - - // Check 1: Verify keyring contains the key - if err := cv.checkKeyExists(result); err != nil { - return result, err - } - - // Check 2: Verify key resolves to correct identity - if err := cv.checkIdentityMatches(result); err != nil { - return result, err - } - - // If keyring checks failed, don't proceed with chain queries - if !result.IsValid() { - return result, nil - } - - // Check 3: Query chain for supernode registration - supernodeInfo, err := cv.checkSupernodeExists(ctx, result) - if err != nil { - return result, err - } - - // If supernode doesn't exist, don't proceed with field comparisons - if supernodeInfo == nil { - return result, nil - } - - // Check 4: Verify supernode state is active - cv.checkSupernodeState(result, supernodeInfo) - - // Check 5: Verify all required ports are available - cv.checkPortsAvailable(result) - - logtrace.Debug(ctx, "Config verification completed", logtrace.Fields{ - "valid": result.IsValid(), - "errors": len(result.Errors), - "warnings": len(result.Warnings), - }) - - return result, nil -} - -// checkKeyExists verifies the configured key exists in keyring -func (cv *ConfigVerifier) checkKeyExists(result *VerificationResult) error { - _, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) - if err != nil { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "key_name", - Actual: cv.config.SupernodeConfig.KeyName, - Message: fmt.Sprintf("Key '%s' not found in keyring", cv.config.SupernodeConfig.KeyName), - }) - } - return nil -} - -// checkIdentityMatches verifies key resolves to configured identity -func (cv *ConfigVerifier) checkIdentityMatches(result *VerificationResult) error { - keyInfo, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) - if err != nil { - // Already handled in checkKeyExists - return nil - } - - pubKey, err := keyInfo.GetPubKey() - if err != nil { - return fmt.Errorf("failed to get public key for key '%s': %w", cv.config.SupernodeConfig.KeyName, err) - } - - addr := sdk.AccAddress(pubKey.Address()) - if addr.String() != cv.config.SupernodeConfig.Identity { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "identity", - Expected: addr.String(), - Actual: cv.config.SupernodeConfig.Identity, - Message: fmt.Sprintf("Key '%s' resolves to %s but config identity is %s", cv.config.SupernodeConfig.KeyName, addr.String(), cv.config.SupernodeConfig.Identity), - }) - } - return nil -} - -// checkSupernodeExists queries chain for supernode registration -func (cv *ConfigVerifier) checkSupernodeExists(ctx context.Context, result *VerificationResult) (*snmodule.SuperNodeInfo, error) { - sn, err := cv.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, cv.config.SupernodeConfig.Identity) - if err != nil { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "registration", - Actual: "not_registered", - Message: fmt.Sprintf("Supernode not registered on chain for address %s", cv.config.SupernodeConfig.Identity), - }) - return nil, nil - } - return sn, nil -} - -// checkP2PPortMatches compares config P2P port with chain -func (cv *ConfigVerifier) checkP2PPortMatches(result *VerificationResult, supernodeInfo *snmodule.SuperNodeInfo) { - configPort := fmt.Sprintf("%d", cv.config.P2PConfig.Port) - chainPort := supernodeInfo.P2PPort - - if chainPort != "" && chainPort != configPort { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "p2p_port", - Expected: chainPort, - Actual: configPort, - Message: fmt.Sprintf("P2P port mismatch: config=%s, chain=%s", configPort, chainPort), - }) - } -} - -// checkSupernodeState verifies supernode is in active state -func (cv *ConfigVerifier) checkSupernodeState(result *VerificationResult, supernodeInfo *snmodule.SuperNodeInfo) { - if supernodeInfo.CurrentState != "" && supernodeInfo.CurrentState != "SUPERNODE_STATE_ACTIVE" { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "state", - Expected: "SUPERNODE_STATE_ACTIVE", - Actual: supernodeInfo.CurrentState, - Message: fmt.Sprintf("Supernode state is %s (expected ACTIVE)", supernodeInfo.CurrentState), - }) - } -} - -// checkPortsAvailable verifies that all required ports are available for binding -func (cv *ConfigVerifier) checkPortsAvailable(result *VerificationResult) { - // Check supernode port - if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.SupernodeConfig.Port)) { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "supernode_port", - Actual: fmt.Sprintf("%d", cv.config.SupernodeConfig.Port), - Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.SupernodeConfig.Port), - }) - } - - // Check P2P port - if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.P2PConfig.Port)) { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "p2p_port", - Actual: fmt.Sprintf("%d", cv.config.P2PConfig.Port), - Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.P2PConfig.Port), - }) - } - - // Check gateway port (use configured port or default port 8002) - gatewayPort := int(cv.config.SupernodeConfig.GatewayPort) - if gatewayPort == 0 { - gatewayPort = 8002 // Default gateway port (same as gateway.DefaultGatewayPort) - } - - if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, gatewayPort) { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "gateway_port", - Actual: fmt.Sprintf("%d", gatewayPort), - Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", gatewayPort), - }) - } -} - -// isPortAvailable checks if a port is available for binding -func (cv *ConfigVerifier) isPortAvailable(host string, port int) bool { - address := fmt.Sprintf("%s:%d", host, port) - - // Try to listen on the port - listener, err := net.Listen("tcp", address) - if err != nil { - return false // Port is not available - } - - // Close the listener immediately since we're just checking availability - listener.Close() - return true // Port is available -} diff --git a/supernode/services/verifier/verifier_test.go b/supernode/services/verifier/verifier_test.go deleted file mode 100644 index 56fd3fb7..00000000 --- a/supernode/services/verifier/verifier_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package verifier - -import ( - "net" - "strconv" - "testing" - - "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/stretchr/testify/assert" -) - -func TestNewConfigVerifier(t *testing.T) { - cfg := &config.Config{ - SupernodeConfig: config.SupernodeConfig{ - Identity: "lumera1testaddress", - KeyName: "test-key", - Host: "192.168.1.100", - }, - P2PConfig: config.P2PConfig{ - Port: 4445, - }, - } - - // Test that NewConfigVerifier returns a non-nil service - verifier := NewConfigVerifier(cfg, nil, nil) - assert.NotNil(t, verifier) - assert.Implements(t, (*ConfigVerifierService)(nil), verifier) -} - -func TestVerificationResult_IsValid(t *testing.T) { - tests := []struct { - name string - result *VerificationResult - expected bool - }{ - { - name: "valid with no errors", - result: &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - }, - expected: true, - }, - { - name: "invalid with errors", - result: &VerificationResult{ - Valid: false, - Errors: []ConfigError{ - {Message: "test error"}, - }, - }, - expected: false, - }, - { - name: "valid flag true but has errors", - result: &VerificationResult{ - Valid: true, - Errors: []ConfigError{ - {Message: "test error"}, - }, - }, - expected: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expected, tt.result.IsValid()) - }) - } -} - -func TestVerificationResult_HasWarnings(t *testing.T) { - tests := []struct { - name string - result *VerificationResult - expected bool - }{ - { - name: "no warnings", - result: &VerificationResult{ - Warnings: []ConfigError{}, - }, - expected: false, - }, - { - name: "has warnings", - result: &VerificationResult{ - Warnings: []ConfigError{ - {Message: "test warning"}, - }, - }, - expected: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expected, tt.result.HasWarnings()) - }) - } -} - -func TestVerificationResult_Summary(t *testing.T) { - tests := []struct { - name string - result *VerificationResult - contains []string - }{ - { - name: "success with no warnings", - result: &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{}, - }, - contains: []string{"✓ Config verification successful"}, - }, - { - name: "error message", - result: &VerificationResult{ - Valid: false, - Errors: []ConfigError{ - { - Message: "Key not found", - }, - }, - }, - contains: []string{"✗ Key not found"}, - }, - { - name: "warning message", - result: &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{ - { - Message: "Host mismatch: config=localhost, chain=192.168.1.1", - }, - }, - }, - contains: []string{"⚠ Host mismatch: config=localhost, chain=192.168.1.1"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - summary := tt.result.Summary() - for _, expected := range tt.contains { - assert.Contains(t, summary, expected) - } - }) - } -} - -func TestConfigVerifier_isPortAvailable(t *testing.T) { - cfg := &config.Config{ - SupernodeConfig: config.SupernodeConfig{ - Identity: "lumera1testaddress", - KeyName: "test-key", - Host: "127.0.0.1", - }, - } - - verifier := NewConfigVerifier(cfg, nil, nil).(*ConfigVerifier) - - // Test available port - available := verifier.isPortAvailable("127.0.0.1", 0) // Port 0 lets OS choose available port - assert.True(t, available) - - // Test unavailable port by creating a listener - listener, err := net.Listen("tcp", "127.0.0.1:0") - assert.NoError(t, err) - defer listener.Close() - - // Extract the port that was assigned - _, portStr, err := net.SplitHostPort(listener.Addr().String()) - assert.NoError(t, err) - port, err := strconv.Atoi(portStr) - assert.NoError(t, err) - - // Now test that this port is not available - available = verifier.isPortAvailable("127.0.0.1", port) - assert.False(t, available) -} - -func TestConfigVerifier_checkPortsAvailable(t *testing.T) { - // Create a listener to occupy a port - listener, err := net.Listen("tcp", "127.0.0.1:0") - assert.NoError(t, err) - defer listener.Close() - - // Extract the port that was assigned - _, portStr, err := net.SplitHostPort(listener.Addr().String()) - assert.NoError(t, err) - port, err := strconv.Atoi(portStr) - assert.NoError(t, err) - - cfg := &config.Config{ - SupernodeConfig: config.SupernodeConfig{ - Identity: "lumera1testaddress", - KeyName: "test-key", - Host: "127.0.0.1", - Port: uint16(port), // Use the occupied port - }, - P2PConfig: config.P2PConfig{ - Port: 0, // Available port - }, - } - - verifier := NewConfigVerifier(cfg, nil, nil).(*ConfigVerifier) - result := &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{}, - } - - verifier.checkPortsAvailable(result) - - // Should have error for supernode port being unavailable - assert.False(t, result.IsValid()) - assert.Len(t, result.Errors, 1) - assert.Equal(t, "supernode_port", result.Errors[0].Field) - assert.Contains(t, result.Errors[0].Message, "already in use") -} - -func TestConfigVerifier_checkPortsAvailable_DefaultGatewayPort(t *testing.T) { - // Create a listener to occupy the default gateway port 8002 - listener, err := net.Listen("tcp", "127.0.0.1:8002") - assert.NoError(t, err) - defer listener.Close() - - cfg := &config.Config{ - SupernodeConfig: config.SupernodeConfig{ - Identity: "lumera1testaddress", - KeyName: "test-key", - Host: "127.0.0.1", - Port: 4444, // Available port - GatewayPort: 0, // Not configured, should use default 8002 - }, - P2PConfig: config.P2PConfig{ - Port: 4445, // Available port - }, - } - - verifier := NewConfigVerifier(cfg, nil, nil).(*ConfigVerifier) - result := &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{}, - } - - verifier.checkPortsAvailable(result) - - // Should have error for default gateway port being unavailable - assert.False(t, result.IsValid()) - assert.Len(t, result.Errors, 1) - assert.Equal(t, "gateway_port", result.Errors[0].Field) - assert.Equal(t, "8002", result.Errors[0].Actual) - assert.Contains(t, result.Errors[0].Message, "Port 8002 is already in use") -} diff --git a/supernode/services/common/supernode/metrics.go b/supernode/status/metrics.go similarity index 74% rename from supernode/services/common/supernode/metrics.go rename to supernode/status/metrics.go index 6c36ab35..ff29d100 100644 --- a/supernode/services/common/supernode/metrics.go +++ b/supernode/status/metrics.go @@ -1,4 +1,4 @@ -package supernode +package status import ( "context" @@ -14,19 +14,15 @@ import ( type MetricsCollector struct{} // NewMetricsCollector creates a new metrics collector instance -func NewMetricsCollector() *MetricsCollector { - return &MetricsCollector{} -} +func NewMetricsCollector() *MetricsCollector { return &MetricsCollector{} } // CollectCPUMetrics gathers CPU usage information -// Returns usage percentage as a float64 func (m *MetricsCollector) CollectCPUMetrics(ctx context.Context) (float64, error) { percentages, err := cpu.Percent(time.Second, false) if err != nil { logtrace.Error(ctx, "failed to get cpu info", logtrace.Fields{logtrace.FieldError: err.Error()}) return 0, err } - return percentages[0], nil } @@ -37,49 +33,41 @@ func (m *MetricsCollector) GetCPUCores(ctx context.Context) (int32, error) { logtrace.Error(ctx, "failed to get cpu core count", logtrace.Fields{logtrace.FieldError: err.Error()}) return 0, err } - return int32(cores), nil } // CollectMemoryMetrics gathers memory usage information -// Returns memory statistics including total, used, available, and usage percentage func (m *MetricsCollector) CollectMemoryMetrics(ctx context.Context) (total, used, available uint64, usedPerc float64, err error) { vmem, err := mem.VirtualMemory() if err != nil { logtrace.Error(ctx, "failed to get memory info", logtrace.Fields{logtrace.FieldError: err.Error()}) return 0, 0, 0, 0, err } - return vmem.Total, vmem.Used, vmem.Available, vmem.UsedPercent, nil } +// StorageInfo holds disk usage stats +type StorageInfo struct { + Path string + TotalBytes uint64 + UsedBytes uint64 + AvailableBytes uint64 + UsagePercent float64 +} + // CollectStorageMetrics gathers storage usage information for specified paths -// If paths is empty, it will collect metrics for the root filesystem func (m *MetricsCollector) CollectStorageMetrics(ctx context.Context, paths []string) []StorageInfo { if len(paths) == 0 { - // Default to root filesystem paths = []string{"/"} } - var storageInfos []StorageInfo for _, path := range paths { usage, err := disk.Usage(path) if err != nil { - logtrace.Error(ctx, "failed to get storage info", logtrace.Fields{ - logtrace.FieldError: err.Error(), - "path": path, - }) - continue // Skip this path but continue with others + logtrace.Error(ctx, "failed to get storage info", logtrace.Fields{logtrace.FieldError: err.Error(), "path": path}) + continue } - - storageInfos = append(storageInfos, StorageInfo{ - Path: path, - TotalBytes: usage.Total, - UsedBytes: usage.Used, - AvailableBytes: usage.Free, - UsagePercent: usage.UsedPercent, - }) + storageInfos = append(storageInfos, StorageInfo{Path: path, TotalBytes: usage.Total, UsedBytes: usage.Used, AvailableBytes: usage.Free, UsagePercent: usage.UsedPercent}) } - return storageInfos } diff --git a/supernode/status/service.go b/supernode/status/service.go new file mode 100644 index 00000000..1745b0d3 --- /dev/null +++ b/supernode/status/service.go @@ -0,0 +1,192 @@ +package status + +import ( + "context" + "fmt" + "time" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/p2p" + "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/supernode/config" +) + +// Version is the supernode version, set by the main application +var Version = "dev" + +// SupernodeStatusService provides centralized status information +type SupernodeStatusService struct { + metrics *MetricsCollector + storagePaths []string + startTime time.Time + p2pService p2p.Client + lumeraClient lumera.Client + config *config.Config +} + +// NewSupernodeStatusService creates a new supernode status service instance +func NewSupernodeStatusService(p2pService p2p.Client, lumeraClient lumera.Client, cfg *config.Config) *SupernodeStatusService { + return &SupernodeStatusService{metrics: NewMetricsCollector(), storagePaths: []string{"/"}, startTime: time.Now(), p2pService: p2pService, lumeraClient: lumeraClient, config: cfg} +} + +// GetStatus returns the current system status including optional P2P info +func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetrics bool) (*pb.StatusResponse, error) { + fields := logtrace.Fields{logtrace.FieldMethod: "GetStatus", logtrace.FieldModule: "SupernodeStatusService"} + logtrace.Debug(ctx, "status request received", fields) + + resp := &pb.StatusResponse{} + resp.Version = Version + resp.UptimeSeconds = uint64(time.Since(s.startTime).Seconds()) + + cpuUsage, err := s.metrics.CollectCPUMetrics(ctx) + if err != nil { + return resp, err + } + if resp.Resources == nil { + resp.Resources = &pb.StatusResponse_Resources{} + } + if resp.Resources.Cpu == nil { + resp.Resources.Cpu = &pb.StatusResponse_Resources_CPU{} + } + resp.Resources.Cpu.UsagePercent = cpuUsage + cores, err := s.metrics.GetCPUCores(ctx) + if err != nil { + logtrace.Error(ctx, "failed to get cpu cores", logtrace.Fields{logtrace.FieldError: err.Error()}) + cores = 0 + } + resp.Resources.Cpu.Cores = cores + memTotal, memUsed, memAvail, memUsedPerc, err := s.metrics.CollectMemoryMetrics(ctx) + if err != nil { + return resp, err + } + const bytesToGB = 1024 * 1024 * 1024 + if resp.Resources.Memory == nil { + resp.Resources.Memory = &pb.StatusResponse_Resources_Memory{} + } + resp.Resources.Memory.TotalGb = float64(memTotal) / bytesToGB + resp.Resources.Memory.UsedGb = float64(memUsed) / bytesToGB + resp.Resources.Memory.AvailableGb = float64(memAvail) / bytesToGB + resp.Resources.Memory.UsagePercent = memUsedPerc + if cores > 0 && resp.Resources.Memory.TotalGb > 0 { + resp.Resources.HardwareSummary = fmt.Sprintf("%d cores / %.0fGB RAM", cores, resp.Resources.Memory.TotalGb) + } + // Storage metrics + for _, si := range s.metrics.CollectStorageMetrics(ctx, s.storagePaths) { + resp.Resources.StorageVolumes = append(resp.Resources.StorageVolumes, &pb.StatusResponse_Resources_Storage{ + Path: si.Path, + TotalBytes: si.TotalBytes, + UsedBytes: si.UsedBytes, + AvailableBytes: si.AvailableBytes, + UsagePercent: si.UsagePercent, + }) + } + + if resp.Network == nil { + resp.Network = &pb.StatusResponse_Network{} + } + resp.Network.PeersCount = 0 + resp.Network.PeerAddresses = []string{} + + // Prepare optional P2P metrics container + pm := &pb.StatusResponse_P2PMetrics{ + DhtMetrics: &pb.StatusResponse_P2PMetrics_DhtMetrics{}, + NetworkHandleMetrics: map[string]*pb.StatusResponse_P2PMetrics_HandleCounters{}, + ConnPoolMetrics: map[string]int64{}, + BanList: []*pb.StatusResponse_P2PMetrics_BanEntry{}, + Database: &pb.StatusResponse_P2PMetrics_DatabaseStats{}, + Disk: &pb.StatusResponse_P2PMetrics_DiskStatus{}, + } + + if includeP2PMetrics && s.p2pService != nil { + p2pStats, err := s.p2pService.Stats(ctx) + if err != nil { + logtrace.Error(ctx, "failed to get p2p stats", logtrace.Fields{logtrace.FieldError: err.Error()}) + } else { + if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { + if peersCount, ok := dhtStats["peers_count"].(int); ok { + resp.Network.PeersCount = int32(peersCount) + } + if peers, ok := dhtStats["peers"].([]*kademlia.Node); ok { + resp.Network.PeerAddresses = make([]string, 0, len(peers)) + for _, peer := range peers { + resp.Network.PeerAddresses = append(resp.Network.PeerAddresses, fmt.Sprintf("%s@%s:%d", string(peer.ID), peer.IP, peer.Port)) + } + } else { + resp.Network.PeerAddresses = []string{} + } + } + if du, ok := p2pStats["disk-info"].(utils.DiskStatus); ok { + pm.Disk.AllMb = du.All + pm.Disk.UsedMb = du.Used + pm.Disk.FreeMb = du.Free + } else if duPtr, ok := p2pStats["disk-info"].(*utils.DiskStatus); ok && duPtr != nil { + pm.Disk.AllMb = duPtr.All + pm.Disk.UsedMb = duPtr.Used + pm.Disk.FreeMb = duPtr.Free + } + if bans, ok := p2pStats["ban-list"].([]kademlia.BanSnapshot); ok { + for _, b := range bans { + pm.BanList = append(pm.BanList, &pb.StatusResponse_P2PMetrics_BanEntry{Id: b.ID, Ip: b.IP, Port: uint32(b.Port), Count: int32(b.Count), CreatedAtUnix: b.CreatedAt.Unix(), AgeSeconds: int64(b.Age.Seconds())}) + } + } + if pool, ok := p2pStats["conn-pool"].(map[string]int64); ok { + for k, v := range pool { + pm.ConnPoolMetrics[k] = v + } + } + if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { + if db, ok := dhtStats["database"].(map[string]interface{}); ok { + var sizeMB float64 + if v, ok := db["p2p_db_size"].(float64); ok { + sizeMB = v + } + var recs int64 + switch v := db["p2p_db_records_count"].(type) { + case int: + recs = int64(v) + case int64: + recs = v + case float64: + recs = int64(v) + } + pm.Database.P2PDbSizeMb = sizeMB + pm.Database.P2PDbRecordsCount = recs + } + if nhm, ok := dhtStats["network"].(map[string]kademlia.HandleCounters); ok { + for k, c := range nhm { + pm.NetworkHandleMetrics[k] = &pb.StatusResponse_P2PMetrics_HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} + } + } else if nhmI, ok := dhtStats["network"].(map[string]interface{}); ok { + for k, vi := range nhmI { + if c, ok := vi.(kademlia.HandleCounters); ok { + pm.NetworkHandleMetrics[k] = &pb.StatusResponse_P2PMetrics_HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} + } + } + } + } + if snap, ok := p2pStats["dht_metrics"].(kademlia.DHTMetricsSnapshot); ok { + for _, sp := range snap.StoreSuccessRecent { + pm.DhtMetrics.StoreSuccessRecent = append(pm.DhtMetrics.StoreSuccessRecent, &pb.StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{TimeUnix: sp.Time.Unix(), Requests: int32(sp.Requests), Successful: int32(sp.Successful), SuccessRate: sp.SuccessRate}) + } + for _, bp := range snap.BatchRetrieveRecent { + pm.DhtMetrics.BatchRetrieveRecent = append(pm.DhtMetrics.BatchRetrieveRecent, &pb.StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{TimeUnix: bp.Time.Unix(), Keys: int32(bp.Keys), Required: int32(bp.Required), FoundLocal: int32(bp.FoundLocal), FoundNetwork: int32(bp.FoundNet), DurationMs: bp.Duration.Milliseconds()}) + } + pm.DhtMetrics.HotPathBannedSkips = snap.HotPathBannedSkips + pm.DhtMetrics.HotPathBanIncrements = snap.HotPathBanIncrements + } + } + } + if includeP2PMetrics { + resp.P2PMetrics = pm + } + + if s.config != nil && s.lumeraClient != nil { + if supernodeInfo, err := s.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, s.config.SupernodeConfig.Identity); err == nil && supernodeInfo != nil { + resp.IpAddress = supernodeInfo.LatestAddress + } + } + return resp, nil +} diff --git a/supernode/node/supernode/gateway/server.go b/supernode/transport/gateway/server.go similarity index 100% rename from supernode/node/supernode/gateway/server.go rename to supernode/transport/gateway/server.go diff --git a/supernode/node/supernode/gateway/swagger.go b/supernode/transport/gateway/swagger.go similarity index 59% rename from supernode/node/supernode/gateway/swagger.go rename to supernode/transport/gateway/swagger.go index d86d0ad9..4bcd3f3d 100644 --- a/supernode/node/supernode/gateway/swagger.go +++ b/supernode/transport/gateway/swagger.go @@ -22,30 +22,30 @@ const swaggerUIHTML = ` *, *:before, *:after { box-sizing: inherit; } body { margin:0; background: #fafafa; } - - -
- - - - - + + +
+ + + + + ` // serveSwaggerJSON serves the OpenAPI specification @@ -58,12 +58,12 @@ func (s *Server) serveSwaggerJSON(w http.ResponseWriter, r *http.Request) { // serveSwaggerUI serves the Swagger UI interface func (s *Server) serveSwaggerUI(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/html") - + tmpl, err := template.New("swagger").Parse(swaggerUIHTML) if err != nil { http.Error(w, "Failed to load Swagger UI", http.StatusInternalServerError) return } - + tmpl.Execute(w, nil) -} \ No newline at end of file +} diff --git a/supernode/transport/gateway/swagger.json b/supernode/transport/gateway/swagger.json new file mode 100644 index 00000000..e6857ae0 --- /dev/null +++ b/supernode/transport/gateway/swagger.json @@ -0,0 +1,46 @@ +{ + "openapi": "3.0.0", + "info": { + "title": "Supernode API", + "version": "1.0.0", + "description": "Supernode status and metrics API" + }, + "paths": { + "/api/v1/status": { + "get": { + "summary": "Get supernode status", + "description": "Returns system, network, and optional P2P metrics", + "parameters": [ + { + "name": "include_p2p_metrics", + "in": "query", + "schema": { "type": "boolean" }, + "required": false, + "description": "Optional: include detailed P2P metrics in the response\nMaps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true" + } + ], + "responses": { + "200": { + "description": "Status response", + "content": { + "application/json": { + "schema": { "$ref": "#/components/schemas/StatusResponse" } + } + } + } + } + } + } + }, + "components": { + "schemas": { + "StatusResponse": { + "type": "object", + "properties": { + "version": { "type": "string" }, + "uptimeSeconds": { "type": "integer" } + } + } + } + } +} diff --git a/supernode/transport/grpc/cascade/handler.go b/supernode/transport/grpc/cascade/handler.go new file mode 100644 index 00000000..139ba8b3 --- /dev/null +++ b/supernode/transport/grpc/cascade/handler.go @@ -0,0 +1,311 @@ +package cascade + +import ( + "encoding/hex" + "fmt" + "hash" + "io" + "os" + "path/filepath" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" + "lukechampine.com/blake3" +) + +type ActionServer struct { pb.UnimplementedCascadeServiceServer; factory cascadeService.CascadeServiceFactory } + + +// NewCascadeActionServer creates a new CascadeActionServer with injected service +func NewCascadeActionServer(factory cascadeService.CascadeServiceFactory) *ActionServer { return &ActionServer{factory: factory} } + +// calculateOptimalChunkSize returns an optimal chunk size based on file size +// to balance throughput and memory usage +func calculateOptimalChunkSize(fileSize int64) int { + const ( + minChunkSize = 64 * 1024 // 64 KB minimum + maxChunkSize = 4 * 1024 * 1024 // 4 MB maximum for 1GB+ files + smallFileThreshold = 1024 * 1024 // 1 MB + mediumFileThreshold = 50 * 1024 * 1024 // 50 MB + largeFileThreshold = 500 * 1024 * 1024 // 500 MB + ) + + var chunkSize int + + switch { + case fileSize <= smallFileThreshold: + chunkSize = minChunkSize + case fileSize <= mediumFileThreshold: + chunkSize = 256 * 1024 + case fileSize <= largeFileThreshold: + chunkSize = 1024 * 1024 + default: + chunkSize = maxChunkSize + } + + if chunkSize < minChunkSize { + chunkSize = minChunkSize + } + if chunkSize > maxChunkSize { + chunkSize = maxChunkSize + } + return chunkSize +} + +func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) error { + fields := logtrace.Fields{ + logtrace.FieldMethod: "Register", + logtrace.FieldModule: "CascadeActionServer", + } + + ctx := stream.Context() + logtrace.Debug(ctx, "client streaming request to upload cascade input data received", fields) + + const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit + + var ( + metadata *pb.Metadata + totalSize int + ) + + hasher, tempFile, tempFilePath, err := initializeHasherAndTempFile() + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to initialize hasher and temp file", fields) + return fmt.Errorf("initializing hasher and temp file: %w", err) + } + defer func(tempFile *os.File) { + err := tempFile.Close() + if err != nil && !errors.Is(err, os.ErrClosed) { + fields[logtrace.FieldError] = err.Error() + logtrace.Warn(ctx, "error closing temp file", fields) + } + }(tempFile) + + for { + req, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "error receiving stream data", fields) + return fmt.Errorf("failed to receive stream data: %w", err) + } + + switch x := req.RequestType.(type) { + case *pb.RegisterRequest_Chunk: + if x.Chunk != nil { + if _, err := hasher.Write(x.Chunk.Data); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to write chunk to hasher", fields) + return fmt.Errorf("hashing error: %w", err) + } + if _, err := tempFile.Write(x.Chunk.Data); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to write chunk to file", fields) + return fmt.Errorf("file write error: %w", err) + } + totalSize += len(x.Chunk.Data) + if totalSize > maxFileSize { + fields[logtrace.FieldError] = "file size exceeds 1GB limit" + fields["total_size"] = totalSize + logtrace.Error(ctx, "upload rejected: file too large", fields) + return fmt.Errorf("file size %d exceeds maximum allowed size of 1GB", totalSize) + } + logtrace.Debug(ctx, "received data chunk", logtrace.Fields{"chunk_size": len(x.Chunk.Data), "total_size_so_far": totalSize}) + } + case *pb.RegisterRequest_Metadata: + metadata = x.Metadata + logtrace.Debug(ctx, "received metadata", logtrace.Fields{"task_id": metadata.TaskId, "action_id": metadata.ActionId}) + } + } + + if metadata == nil { + logtrace.Error(ctx, "no metadata received in stream", fields) + return fmt.Errorf("no metadata received") + } + fields[logtrace.FieldTaskID] = metadata.GetTaskId() + fields[logtrace.FieldActionID] = metadata.GetActionId() + logtrace.Debug(ctx, "metadata received from action-sdk", fields) + + if err := tempFile.Sync(); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to sync temp file", fields) + return fmt.Errorf("failed to sync temp file: %w", err) + } + + hash := hasher.Sum(nil) + hashHex := hex.EncodeToString(hash) + fields[logtrace.FieldHashHex] = hashHex + logtrace.Debug(ctx, "final BLAKE3 hash generated", fields) + + targetPath, err := replaceTempDirWithTaskDir(metadata.GetTaskId(), tempFilePath, tempFile) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to replace temp dir with task dir", fields) + return fmt.Errorf("failed to replace temp dir with task dir: %w", err) + } + + task := server.factory.NewCascadeRegistrationTask() + err = task.Register(ctx, &cascadeService.RegisterRequest{ + TaskID: metadata.TaskId, + ActionID: metadata.ActionId, + DataHash: hash, + DataSize: totalSize, + FilePath: targetPath, + }, func(resp *cascadeService.RegisterResponse) error { + grpcResp := &pb.RegisterResponse{ + EventType: pb.SupernodeEventType(resp.EventType), + Message: resp.Message, + TxHash: resp.TxHash, + } + if err := stream.Send(grpcResp); err != nil { + logtrace.Error(ctx, "failed to send response to client", logtrace.Fields{logtrace.FieldError: err.Error()}) + return err + } + return nil + }) + if err != nil { + logtrace.Error(ctx, "registration task failed", logtrace.Fields{logtrace.FieldError: err.Error()}) + return fmt.Errorf("registration failed: %w", err) + } + logtrace.Debug(ctx, "cascade registration completed successfully", fields) + return nil +} + +func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeService_DownloadServer) error { + ctx := stream.Context() + fields := logtrace.Fields{ + logtrace.FieldMethod: "Download", + logtrace.FieldModule: "CascadeActionServer", + logtrace.FieldActionID: req.GetActionId(), + } + logtrace.Debug(ctx, "download request received", fields) + + // Prepare to capture decoded file path from task events + var decodedFilePath string + var tmpDir string + + task := server.factory.NewCascadeRegistrationTask() + // Run cascade task Download; stream events back to client + err := task.Download(ctx, &cascadeService.DownloadRequest{ActionID: req.GetActionId(), Signature: req.GetSignature()}, func(resp *cascadeService.DownloadResponse) error { + // Forward event to gRPC client + evt := &pb.DownloadResponse{ + ResponseType: &pb.DownloadResponse_Event{ + Event: &pb.DownloadEvent{ + EventType: pb.SupernodeEventType(resp.EventType), + Message: resp.Message, + }, + }, + } + if sendErr := stream.Send(evt); sendErr != nil { + return sendErr + } + // Capture decode-completed info for streaming + if resp.EventType == cascadeService.SupernodeEventTypeDecodeCompleted { + decodedFilePath = resp.FilePath + tmpDir = resp.DownloadedDir + } + return nil + }) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "download task failed", fields) + return fmt.Errorf("download task failed: %w", err) + } + + if decodedFilePath == "" { + logtrace.Warn(ctx, "decode completed without file path", fields) + return nil + } + + // Notify client that server is ready to stream the file + if err := stream.Send(&pb.DownloadResponse{ResponseType: &pb.DownloadResponse_Event{Event: &pb.DownloadEvent{EventType: pb.SupernodeEventType_SERVE_READY, Message: "Serve ready"}}}); err != nil { + return fmt.Errorf("send serve-ready: %w", err) + } + + // Stream file content in chunks + fi, err := os.Stat(decodedFilePath) + if err != nil { + return fmt.Errorf("stat decoded file: %w", err) + } + chunkSize := calculateOptimalChunkSize(fi.Size()) + f, err := os.Open(decodedFilePath) + if err != nil { + return fmt.Errorf("open decoded file: %w", err) + } + defer f.Close() + + buf := make([]byte, chunkSize) + for { + n, rerr := f.Read(buf) + if n > 0 { + if err := stream.Send(&pb.DownloadResponse{ResponseType: &pb.DownloadResponse_Chunk{Chunk: &pb.DataChunk{Data: append([]byte(nil), buf[:n]...)}}}); err != nil { + return fmt.Errorf("send chunk: %w", err) + } + } + if rerr == io.EOF { + break + } + if rerr != nil { + return fmt.Errorf("read decoded file: %w", rerr) + } + } + + // Cleanup temp directory if provided + if tmpDir != "" { + if cerr := task.CleanupDownload(ctx, tmpDir); cerr != nil { + logtrace.Warn(ctx, "cleanup of tmp dir failed", logtrace.Fields{"tmp_dir": tmpDir, logtrace.FieldError: cerr.Error()}) + } + } + + logtrace.Debug(ctx, "download stream completed", fields) + return nil +} + +// initializeHasherAndTempFile prepares a hasher and a temporary file to stream upload data into. +func initializeHasherAndTempFile() (hash.Hash, *os.File, string, error) { + // Create a temp directory for the upload + tmpDir, err := os.MkdirTemp("", "supernode-upload-*") + if err != nil { + return nil, nil, "", fmt.Errorf("create temp dir: %w", err) + } + + // Create a file within the temp directory + filePath := filepath.Join(tmpDir, "data.bin") + f, err := os.Create(filePath) + if err != nil { + return nil, nil, "", fmt.Errorf("create temp file: %w", err) + } + + // Create a BLAKE3 hasher (32 bytes output) + hasher := blake3.New(32, nil) + return hasher, f, filePath, nil +} + +// replaceTempDirWithTaskDir moves the uploaded file into a task-scoped directory +// and returns the new absolute path. +func replaceTempDirWithTaskDir(taskID, tempFilePath string, tempFile *os.File) (string, error) { + // Ensure data is flushed + _ = tempFile.Sync() + // Close now; deferred close may run later and is safe to ignore + _ = tempFile.Close() + + // Create a stable target directory under OS temp + targetDir := filepath.Join(os.TempDir(), "supernode", "uploads", taskID) + if err := os.MkdirAll(targetDir, 0700); err != nil { + return "", fmt.Errorf("create task dir: %w", err) + } + + newPath := filepath.Join(targetDir, filepath.Base(tempFilePath)) + if err := os.Rename(tempFilePath, newPath); err != nil { + return "", fmt.Errorf("move uploaded file: %w", err) + } + + // Attempt to cleanup the original temp directory + _ = os.RemoveAll(filepath.Dir(tempFilePath)) + return newPath, nil +} diff --git a/supernode/transport/grpc/status/handler.go b/supernode/transport/grpc/status/handler.go new file mode 100644 index 00000000..8e414cca --- /dev/null +++ b/supernode/transport/grpc/status/handler.go @@ -0,0 +1,25 @@ +package server + +import ( + "context" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + statussvc "github.com/LumeraProtocol/supernode/v2/supernode/status" +) + +// SupernodeServer implements the SupernodeService gRPC service +type SupernodeServer struct { + pb.UnimplementedSupernodeServiceServer + statusService *statussvc.SupernodeStatusService +} + + +// NewSupernodeServer creates a new SupernodeServer +func NewSupernodeServer(statusService *statussvc.SupernodeStatusService) *SupernodeServer { + return &SupernodeServer{statusService: statusService} +} + +// GetStatus implements SupernodeService.GetStatus +func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) { + return s.statusService.GetStatus(ctx, req.GetIncludeP2PMetrics()) +} diff --git a/supernode/node/supernode/server/server.go b/supernode/transport/grpc/status/server.go similarity index 63% rename from supernode/node/supernode/server/server.go rename to supernode/transport/grpc/status/server.go index 774be094..b06ae9df 100644 --- a/supernode/node/supernode/server/server.go +++ b/supernode/transport/grpc/status/server.go @@ -7,7 +7,6 @@ import ( "strconv" "strings" - "google.golang.org/grpc" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" @@ -22,19 +21,17 @@ import ( "github.com/cosmos/cosmos-sdk/crypto/keyring" ) -type service interface { - Desc() *grpc.ServiceDesc -} - // Server represents supernode server type Server struct { - config *Config - services []service - name string - kr keyring.Keyring - grpcServer *grpcserver.Server - lumeraClient lumera.Client - healthServer *health.Server + identity string + listenAddrs string + port int + services []grpcserver.ServiceDesc + name string + kr keyring.Keyring + grpcServer *grpcserver.Server + lumeraClient lumera.Client + healthServer *health.Server } // Run starts the server @@ -48,12 +45,12 @@ func (server *Server) Run(ctx context.Context) error { // Set up gRPC logging logtrace.SetGRPCLogger() - logtrace.Debug(ctx, "Server identity configured", logtrace.Fields{logtrace.FieldModule: "server", "identity": server.config.Identity}) - logtrace.Debug(ctx, "Server listening", logtrace.Fields{logtrace.FieldModule: "server", "addresses": server.config.ListenAddresses}) + logtrace.Debug(ctx, "Server identity configured", logtrace.Fields{logtrace.FieldModule: "server", "identity": server.identity}) + logtrace.Debug(ctx, "Server listening", logtrace.Fields{logtrace.FieldModule: "server", "addresses": server.listenAddrs}) group, ctx := errgroup.WithContext(ctx) - addresses := strings.Split(server.config.ListenAddresses, ",") + addresses := strings.Split(server.listenAddrs, ",") if err := server.setupGRPCServer(); err != nil { logtrace.Fatal(ctx, "Failed to setup gRPC server", logtrace.Fields{logtrace.FieldModule: "server", logtrace.FieldError: err.Error()}) } @@ -70,7 +67,7 @@ func (server *Server) Run(ctx context.Context) error { opts.WriteBufferSize = (8 * 1024 * 1024) // 8MB TCP buffer for _, address := range addresses { - addr := net.JoinHostPort(strings.TrimSpace(address), strconv.Itoa(server.config.Port)) + addr := net.JoinHostPort(strings.TrimSpace(address), strconv.Itoa(server.port)) address := addr // Create a new variable to avoid closure issues group.Go(func() error { @@ -84,14 +81,14 @@ func (server *Server) Run(ctx context.Context) error { func (server *Server) setupGRPCServer() error { // Create server credentials - serverCreds, err := ltc.NewServerCreds(<c.ServerOptions{ - CommonOptions: ltc.CommonOptions{ - Keyring: server.kr, - LocalIdentity: server.config.Identity, - PeerType: securekeyx.Supernode, - Validator: lumera.NewSecureKeyExchangeValidator(server.lumeraClient), - }, - }) + serverCreds, err := ltc.NewServerCreds(<c.ServerOptions{ + CommonOptions: ltc.CommonOptions{ + Keyring: server.kr, + LocalIdentity: server.identity, + PeerType: securekeyx.Supernode, + Validator: lumera.NewSecureKeyExchangeValidator(server.lumeraClient), + }, + }) if err != nil { return fmt.Errorf("failed to create server credentials: %w", err) } @@ -107,27 +104,11 @@ func (server *Server) setupGRPCServer() error { server.healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) // Register all services - var supernodeServer *SupernodeServer - for _, service := range server.services { - server.grpcServer.RegisterService(service.Desc(), service) - server.healthServer.SetServingStatus(service.Desc().ServiceName, healthpb.HealthCheckResponse_SERVING) - - // Keep reference to SupernodeServer - if ss, ok := service.(*SupernodeServer); ok { - supernodeServer = ss - } - } - - // After all services are registered, update SupernodeServer with the list - if supernodeServer != nil { - // Register all custom services - for _, svc := range server.services { - supernodeServer.RegisterService(svc.Desc().ServiceName, svc.Desc()) + for _, s := range server.services { + server.grpcServer.RegisterService(s.Desc, s.Service) + if s.Desc != nil { + server.healthServer.SetServingStatus(s.Desc.ServiceName, healthpb.HealthCheckResponse_SERVING) } - - // Also register the health service - healthDesc := healthpb.Health_ServiceDesc - supernodeServer.RegisterService(healthDesc.ServiceName, &healthDesc) } return nil @@ -146,7 +127,10 @@ func (server *Server) Close() { // Set all services to NOT_SERVING before shutdown server.healthServer.SetServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING) for _, service := range server.services { - serviceName := service.Desc().ServiceName + serviceName := "" + if service.Desc != nil { + serviceName = service.Desc.ServiceName + } server.healthServer.SetServingStatus(serviceName, healthpb.HealthCheckResponse_NOT_SERVING) } } @@ -158,16 +142,17 @@ func (server *Server) Close() { } // New returns a new Server instance. -func New(config *Config, name string, kr keyring.Keyring, lumeraClient lumera.Client, services ...service) (*Server, error) { - if config == nil { - return nil, fmt.Errorf("config is nil") - } - - return &Server{ - config: config, - services: services, - name: name, - kr: kr, - lumeraClient: lumeraClient, - }, nil +func New(identity, listenAddrs string, port int, name string, kr keyring.Keyring, lumeraClient lumera.Client, services ...grpcserver.ServiceDesc) (*Server, error) { + if listenAddrs == "" { + return nil, fmt.Errorf("listen addresses cannot be empty") + } + return &Server{ + identity: identity, + listenAddrs: listenAddrs, + port: port, + services: services, + name: name, + kr: kr, + lumeraClient: lumeraClient, + }, nil } diff --git a/supernode/verifier/interface.go b/supernode/verifier/interface.go new file mode 100644 index 00000000..d068dfce --- /dev/null +++ b/supernode/verifier/interface.go @@ -0,0 +1,31 @@ +package verifier + +import "context" + +// ConfigVerifierService defines verification methods +type ConfigVerifierService interface { + VerifyConfig(ctx context.Context) (*VerificationResult, error) +} + +// ConfigError represents a config validation error or warning +type ConfigError struct { + Field string + Expected string + Actual string + Message string +} + +// VerificationResult holds the outcome of config verification +type VerificationResult struct { + Valid bool + Errors []ConfigError + Warnings []ConfigError +} + +func (r *VerificationResult) IsValid() bool { return r.Valid && len(r.Errors) == 0 } +func (r *VerificationResult) HasWarnings() bool { return len(r.Warnings) > 0 } +func (r *VerificationResult) Summary() string { + if !r.IsValid() { return "invalid: check errors" } + if r.HasWarnings() { return "valid with warnings" } + return "valid" +} diff --git a/supernode/verifier/verifier.go b/supernode/verifier/verifier.go new file mode 100644 index 00000000..91d35a41 --- /dev/null +++ b/supernode/verifier/verifier.go @@ -0,0 +1,76 @@ +package verifier + +import ( + "context" + "fmt" + "net" + + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + snmodule "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" + "github.com/LumeraProtocol/supernode/v2/supernode/config" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type ConfigVerifier struct { config *config.Config; lumeraClient lumera.Client; keyring keyring.Keyring } + +func NewConfigVerifier(cfg *config.Config, client lumera.Client, kr keyring.Keyring) ConfigVerifierService { return &ConfigVerifier{config: cfg, lumeraClient: client, keyring: kr} } + +func (cv *ConfigVerifier) VerifyConfig(ctx context.Context) (*VerificationResult, error) { + result := &VerificationResult{ Valid: true, Errors: []ConfigError{}, Warnings: []ConfigError{} } + logtrace.Debug(ctx, "Starting config verification", logtrace.Fields{"identity": cv.config.SupernodeConfig.Identity, "key_name": cv.config.SupernodeConfig.KeyName, "p2p_port": cv.config.P2PConfig.Port}) + if err := cv.checkKeyExists(result); err != nil { return result, err } + if err := cv.checkIdentityMatches(result); err != nil { return result, err } + if !result.IsValid() { return result, nil } + supernodeInfo, err := cv.checkSupernodeExists(ctx, result); if err != nil { return result, err } + if supernodeInfo == nil { return result, nil } + cv.checkSupernodeState(result, supernodeInfo) + cv.checkPortsAvailable(result) + logtrace.Debug(ctx, "Config verification completed", logtrace.Fields{"valid": result.IsValid(), "errors": len(result.Errors), "warnings": len(result.Warnings)}) + return result, nil +} + +func (cv *ConfigVerifier) checkKeyExists(result *VerificationResult) error { + _, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) + if err != nil { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "key_name", Actual: cv.config.SupernodeConfig.KeyName, Message: fmt.Sprintf("Key '%s' not found in keyring", cv.config.SupernodeConfig.KeyName)}) } + return nil +} + +func (cv *ConfigVerifier) checkIdentityMatches(result *VerificationResult) error { + keyInfo, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName); if err != nil { return nil } + pubKey, err := keyInfo.GetPubKey(); if err != nil { return fmt.Errorf("failed to get public key for key '%s': %w", cv.config.SupernodeConfig.KeyName, err) } + addr := sdk.AccAddress(pubKey.Address()) + if addr.String() != cv.config.SupernodeConfig.Identity { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "identity", Expected: addr.String(), Actual: cv.config.SupernodeConfig.Identity, Message: fmt.Sprintf("Key '%s' resolves to %s but config identity is %s", cv.config.SupernodeConfig.KeyName, addr.String(), cv.config.SupernodeConfig.Identity)}) } + return nil +} + +func (cv *ConfigVerifier) checkSupernodeExists(ctx context.Context, result *VerificationResult) (*snmodule.SuperNodeInfo, error) { + sn, err := cv.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, cv.config.SupernodeConfig.Identity) + if err != nil { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "registration", Actual: "not_registered", Message: fmt.Sprintf("Supernode not registered on chain for address %s", cv.config.SupernodeConfig.Identity)}); return nil, nil } + return sn, nil +} + +func (cv *ConfigVerifier) checkP2PPortMatches(result *VerificationResult, supernodeInfo *snmodule.SuperNodeInfo) { + configPort := fmt.Sprintf("%d", cv.config.P2PConfig.Port) + chainPort := supernodeInfo.P2PPort + if chainPort != "" && chainPort != configPort { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "p2p_port", Expected: chainPort, Actual: configPort, Message: fmt.Sprintf("P2P port mismatch: config=%s, chain=%s", configPort, chainPort)}) } +} + +func (cv *ConfigVerifier) checkSupernodeState(result *VerificationResult, supernodeInfo *snmodule.SuperNodeInfo) { + if supernodeInfo.CurrentState != "" && supernodeInfo.CurrentState != "SUPERNODE_STATE_ACTIVE" { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "state", Expected: "SUPERNODE_STATE_ACTIVE", Actual: supernodeInfo.CurrentState, Message: fmt.Sprintf("Supernode state is %s (expected ACTIVE)", supernodeInfo.CurrentState)}) } +} + +func (cv *ConfigVerifier) checkPortsAvailable(result *VerificationResult) { + if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.SupernodeConfig.Port)) { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "supernode_port", Actual: fmt.Sprintf("%d", cv.config.SupernodeConfig.Port), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.SupernodeConfig.Port)}) } + if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.P2PConfig.Port)) { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "p2p_port", Actual: fmt.Sprintf("%d", cv.config.P2PConfig.Port), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.P2PConfig.Port)}) } + gatewayPort := int(cv.config.SupernodeConfig.GatewayPort); if gatewayPort == 0 { gatewayPort = 8002 } + if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, gatewayPort) { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "gateway_port", Actual: fmt.Sprintf("%d", gatewayPort), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", gatewayPort)}) } +} + +func (cv *ConfigVerifier) isPortAvailable(host string, port int) bool { + ln, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port)) + if err != nil { return false } + _ = ln.Close() + return true +} diff --git a/tests/system/e2e_cascade_test.go b/tests/system/e2e_cascade_test.go index 2db7ad09..440fb2a3 100644 --- a/tests/system/e2e_cascade_test.go +++ b/tests/system/e2e_cascade_test.go @@ -244,7 +244,7 @@ func TestCascadeE2E(t *testing.T) { t.Log("Step 4: Creating test file for RaptorQ encoding") // Use test file from tests/system directory - testFileName := "test.txt" + testFileName := "sample9.zip" testFileFullpath := filepath.Join(testFileName) // Verify test file exists From 2647c37a904cff403ca999b22836c8478d1ee83d Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Sat, 4 Oct 2025 15:41:03 +0500 Subject: [PATCH 05/36] FileName --- tests/system/e2e_cascade_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system/e2e_cascade_test.go b/tests/system/e2e_cascade_test.go index 440fb2a3..2db7ad09 100644 --- a/tests/system/e2e_cascade_test.go +++ b/tests/system/e2e_cascade_test.go @@ -244,7 +244,7 @@ func TestCascadeE2E(t *testing.T) { t.Log("Step 4: Creating test file for RaptorQ encoding") // Use test file from tests/system directory - testFileName := "sample9.zip" + testFileName := "test.txt" testFileFullpath := filepath.Join(testFileName) // Verify test file exists From 9e1360eea3a7471c9738eeaf9abac6c4b2fa8537 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Sat, 4 Oct 2025 16:02:28 +0500 Subject: [PATCH 06/36] go fmt --- p2p/kademlia/dht.go | 131 ++-- p2p/kademlia/network.go | 199 +++--- p2p/kademlia/rq_symbols.go | 38 +- p2p/p2p.go | 30 +- pkg/cascadekit/doc.go | 1 - pkg/cascadekit/index.go | 57 +- pkg/cascadekit/index_parse.go | 25 +- pkg/cascadekit/metadata.go | 17 +- pkg/cascadekit/metadata_helpers.go | 29 +- pkg/cascadekit/parsers.go | 59 +- pkg/cascadekit/rqid.go | 84 +-- pkg/cascadekit/signatures.go | 85 +-- pkg/cascadekit/verify.go | 27 +- pkg/logtrace/datadog.go | 16 +- pkg/lumera/modules/action_msg/helpers.go | 90 +-- pkg/lumera/util/coin.go | 49 +- sdk/adapters/lumera/adapter.go | 32 +- sdk/adapters/supernodeservice/types.go | 14 +- sdk/event/keys.go | 2 +- sdk/event/types.go | 64 +- sdk/net/client.go | 12 +- sdk/net/impl.go | 42 +- sdk/task/timeouts.go | 1 - supernode/cascade/adaptors/lumera.go | 37 +- supernode/cascade/adaptors/p2p.go | 261 +++++--- supernode/cascade/adaptors/rq.go | 39 +- supernode/cascade/config.go | 8 +- supernode/cascade/events.go | 38 +- supernode/cascade/helper.go | 48 +- supernode/cascade/interfaces.go | 10 +- supernode/cascade/register.go | 230 +++---- supernode/cascade/service.go | 41 +- supernode/cmd/config_update.go | 16 +- supernode/cmd/service.go | 45 +- supernode/config/defaults.go | 19 +- supernode/transport/grpc/cascade/handler.go | 572 +++++++++--------- supernode/transport/grpc/status/handler.go | 15 +- supernode/transport/grpc/status/server.go | 66 +- supernode/verifier/interface.go | 28 +- supernode/verifier/verifier.go | 143 +++-- tests/integration/p2p/p2p_integration_test.go | 4 +- 41 files changed, 1447 insertions(+), 1277 deletions(-) diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index 9d029479..62aa2768 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -124,7 +124,7 @@ func (s *DHT) ConnPoolSnapshot() map[string]int64 { // Options contains configuration options for the queries node type Options struct { - ID []byte + ID []byte // The queries IPv4 or IPv6 address IP string @@ -141,7 +141,6 @@ type Options struct { // Keyring for credentials Keyring keyring.Keyring - } // NewDHT returns a new DHT node @@ -471,7 +470,7 @@ func (s *DHT) Stats(ctx context.Context) (map[string]interface{}, error) { dhtStats["peers_count"] = len(s.ht.nodes()) dhtStats["peers"] = s.ht.nodes() dhtStats["network"] = s.network.HandleMetricsSnapshot() - // Removed: recent per-request snapshots (logs provide visibility) + // Removed: recent per-request snapshots (logs provide visibility) dhtStats["database"] = dbStats return dhtStats, nil @@ -682,7 +681,7 @@ func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result } func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, txID string, localOnly ...bool) (result map[string][]byte, err error) { - logtrace.Debug(ctx, "DHT BatchRetrieve begin", logtrace.Fields{"txid": txID, "keys": len(keys), "required": required}) + logtrace.Debug(ctx, "DHT BatchRetrieve begin", logtrace.Fields{"txid": txID, "keys": len(keys), "required": required}) result = make(map[string][]byte) var resMap sync.Map var foundLocalCount int32 @@ -754,23 +753,23 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, if err != nil { return nil, fmt.Errorf("fetch and add local keys: %v", err) } - // Found locally count is logged via summary below; no external metrics + // Found locally count is logged via summary below; no external metrics if foundLocalCount >= required { return result, nil } - batchSize := batchRetrieveSize - var networkFound int32 - totalBatches := int(math.Ceil(float64(required) / float64(batchSize))) - parallelBatches := int(math.Min(float64(totalBatches), float64(fetchSymbolsBatchConcurrency))) + batchSize := batchRetrieveSize + var networkFound int32 + totalBatches := int(math.Ceil(float64(required) / float64(batchSize))) + parallelBatches := int(math.Min(float64(totalBatches), float64(fetchSymbolsBatchConcurrency))) - semaphore := make(chan struct{}, parallelBatches) - var wg sync.WaitGroup - gctx, cancel := context.WithCancel(ctx) - defer cancel() + semaphore := make(chan struct{}, parallelBatches) + var wg sync.WaitGroup + gctx, cancel := context.WithCancel(ctx) + defer cancel() - // Measure only the network retrieval phase (after local scan) - netStart := time.Now() + // Measure only the network retrieval phase (after local scan) + netStart := time.Now() for start := 0; start < len(keys); start += batchSize { end := start + batchSize @@ -802,15 +801,17 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, wg.Wait() - netFound := int(atomic.LoadInt32(&networkFound)) -{ - f := logtrace.Fields{"txid": txID, "found_local": foundLocalCount, "found_network": netFound, "required": required, "ms": time.Since(netStart).Milliseconds(), logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "dht: batch retrieve summary", f) -} - // Record batch retrieve stats for internal DHT snapshot window (network phase only) - s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(netStart)) - // No per-task metrics collector updates + netFound := int(atomic.LoadInt32(&networkFound)) + { + f := logtrace.Fields{"txid": txID, "found_local": foundLocalCount, "found_network": netFound, "required": required, "ms": time.Since(netStart).Milliseconds(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch retrieve summary", f) + } + // Record batch retrieve stats for internal DHT snapshot window (network phase only) + s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(netStart)) + // No per-task metrics collector updates return result, nil } @@ -942,7 +943,7 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, defer func() { <-semaphore }() } - indices := fetchMap[nodeID] + indices := fetchMap[nodeID] requestKeys := make(map[string]KeyValWithClosest) for _, idx := range indices { if idx < len(hexKeys) { @@ -966,9 +967,9 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, firstErr = err } mu.Unlock() - // per-node metrics removed; logs retained - return - } + // per-node metrics removed; logs retained + return + } returned := 0 for k, v := range decompressedData { @@ -988,9 +989,9 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, } } - // per-node metrics removed; logs retained - }(node, nodeID) - } + // per-node metrics removed; logs retained + }(node, nodeID) + } wg.Wait() @@ -1038,20 +1039,24 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, func (s *DHT) doBatchGetValuesCall(ctx context.Context, node *Node, requestKeys map[string]KeyValWithClosest) (map[string]KeyValWithClosest, error) { request := s.newMessage(BatchGetValues, node, &BatchGetValuesRequest{Data: requestKeys}) -{ - f := logtrace.Fields{"node": node.String(), "keys": len(requestKeys), logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "dht: batch get send", f) -} + { + f := logtrace.Fields{"node": node.String(), "keys": len(requestKeys), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch get send", f) + } response, err := s.network.Call(ctx, request, false) if err != nil { return nil, fmt.Errorf("network call request %s failed: %w", request.String(), err) } -{ - f := logtrace.Fields{"node": node.String(), logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "dht: batch get ok", f) -} + { + f := logtrace.Fields{"node": node.String(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch get ok", f) + } resp, ok := response.Data.(*BatchGetValuesResponse) if !ok { @@ -1659,11 +1664,13 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i knownNodes := make(map[string]*Node) hashes := make([][]byte, len(values)) -{ - f := logtrace.Fields{logtrace.FieldModule: "dht", "task_id": id, "keys": len(values), "len_nodes": len(s.ht.nodes()), logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "dht: batch store start", f) -} + { + f := logtrace.Fields{logtrace.FieldModule: "dht", "task_id": id, "keys": len(values), "len_nodes": len(s.ht.nodes()), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch store start", f) + } for i := 0; i < len(values); i++ { target, _ := utils.Blake3Hash(values[i]) hashes[i] = target @@ -1689,11 +1696,11 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i storeResponses := s.batchStoreNetwork(ctx, values, knownNodes, storageMap, typ) for response := range storeResponses { requests++ - var nodeAddr string + var nodeAddr string if response.Receiver != nil { - nodeAddr = response.Receiver.String() + nodeAddr = response.Receiver.String() } else if response.Message != nil && response.Message.Sender != nil { - nodeAddr = response.Message.Sender.String() + nodeAddr = response.Message.Sender.String() } errMsg := "" @@ -1724,7 +1731,7 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i } } - // per-node store metrics removed; logs retained + // per-node store metrics removed; logs retained } @@ -1797,11 +1804,13 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ totalBytes += len(values[idx]) } - { - f := logtrace.Fields{logtrace.FieldModule: "dht", "node": receiver.String(), "keys": len(toStore), "size_mb": utils.BytesIntToMB(totalBytes), logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "dht: batch store RPC send", f) - } + { + f := logtrace.Fields{logtrace.FieldModule: "dht", "node": receiver.String(), "keys": len(toStore), "size_mb": utils.BytesIntToMB(totalBytes), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch store RPC send", f) + } // Skip empty payloads: avoid sending empty store RPCs and do not record no-op metrics. if len(toStore) == 0 { @@ -1827,11 +1836,13 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ return } - { - f := logtrace.Fields{logtrace.FieldModule: "p2p", "node": receiver.String(), "keys": len(toStore), "ms": dur, logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "dht: batch store RPC ok", f) - } + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "node": receiver.String(), "keys": len(toStore), "ms": dur, logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch store RPC ok", f) + } responses <- &MessageWithError{Message: response, KeysCount: len(toStore), Receiver: receiver, DurationMS: dur} } }(node, key) diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index e4ab76e5..ef542ee5 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -68,7 +68,6 @@ type Network struct { sem *semaphore.Weighted metrics sync.Map - } // NewNetwork returns a network service @@ -406,15 +405,15 @@ func (s *Network) handleConn(ctx context.Context, rawConn net.Conn) { }) return } - // stitch correlation + origin into context for downstream handler logs - if request != nil { - if s := strings.TrimSpace(request.CorrelationID); s != "" { - ctx = logtrace.CtxWithCorrelationID(ctx, s) - } - if o := strings.TrimSpace(request.Origin); o != "" { - ctx = logtrace.CtxWithOrigin(ctx, o) - } - } + // stitch correlation + origin into context for downstream handler logs + if request != nil { + if s := strings.TrimSpace(request.CorrelationID); s != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, s) + } + if o := strings.TrimSpace(request.Origin); o != "" { + ctx = logtrace.CtxWithOrigin(ctx, o) + } + } reqID := uuid.New().String() mt := request.MessageType @@ -597,17 +596,17 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes idStr := string(request.Receiver.ID) remoteAddr := fmt.Sprintf("%s@%s:%d", idStr, strings.TrimSpace(request.Receiver.IP), request.Receiver.Port) // Log raw RPC start (reduce noise: Info only for high-signal messages) - startFields := logtrace.Fields{ - logtrace.FieldModule: "p2p", - "remote": remoteAddr, - "message": msgName(request.MessageType), - "timeout_ms": int64(timeout / time.Millisecond), - } - // Tag role/origin for filtering - startFields[logtrace.FieldRole] = "client" - if o := logtrace.OriginFromContext(ctx); o != "" { - startFields[logtrace.FieldOrigin] = o - } + startFields := logtrace.Fields{ + logtrace.FieldModule: "p2p", + "remote": remoteAddr, + "message": msgName(request.MessageType), + "timeout_ms": int64(timeout / time.Millisecond), + } + // Tag role/origin for filtering + startFields[logtrace.FieldRole] = "client" + if o := logtrace.OriginFromContext(ctx); o != "" { + startFields[logtrace.FieldOrigin] = o + } if isHighSignalMsg(request.MessageType) { logtrace.Info(ctx, fmt.Sprintf("RPC %s start remote=%s timeout_ms=%d", msgName(request.MessageType), remoteAddr, int64(timeout/time.Millisecond)), startFields) } else { @@ -615,14 +614,14 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes } // Attach correlation id only for high‑signal messages (store/retrieve batches) - if isHighSignalMsg(request.MessageType) { - if cid := logtrace.CorrelationIDFromContext(ctx); cid != "unknown" { - request.CorrelationID = cid - } - if o := logtrace.OriginFromContext(ctx); o != "" { - request.Origin = o - } - } + if isHighSignalMsg(request.MessageType) { + if cid := logtrace.CorrelationIDFromContext(ctx); cid != "unknown" { + request.CorrelationID = cid + } + if o := logtrace.OriginFromContext(ctx); o != "" { + request.Origin = o + } + } // try get from pool s.connPoolMtx.Lock() @@ -750,13 +749,15 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd return nil, errors.Errorf("conn read: %w", e) } // Single-line completion for successful outbound RPC - if isHighSignalMsg(msgType) { - f := logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), f) - } else { - logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"}) - } + if isHighSignalMsg(msgType) { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), f) + } else { + logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"}) + } return r, nil } } @@ -842,13 +843,15 @@ Retry: s.dropFromPool(remoteAddr, conn) return nil, errors.Errorf("conn read: %w", err) } - if isHighSignalMsg(msgType) { - f := logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), f) - } else { - logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"}) - } + if isHighSignalMsg(msgType) { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), f) + } else { + logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"}) + } return resp, nil } @@ -950,17 +953,17 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r } func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, reqID string) (res []byte, err error) { - defer func() { - if response, err := s.handlePanic(ctx, message.Sender, BatchGetValues); response != nil || err != nil { - res = response - } - }() - - request, ok := message.Data.(*BatchGetValuesRequest) - if !ok { - err := errors.New("invalid BatchGetValuesRequest") - return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) - } + defer func() { + if response, err := s.handlePanic(ctx, message.Sender, BatchGetValues); response != nil || err != nil { + res = response + } + }() + + request, ok := message.Data.(*BatchGetValuesRequest) + if !ok { + err := errors.New("invalid BatchGetValuesRequest") + return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) + } logtrace.Debug(ctx, "Batch get values request received", logtrace.Fields{ logtrace.FieldModule: "p2p", @@ -976,17 +979,19 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, i++ } - values, count, err := s.dht.store.RetrieveBatchValues(ctx, keys, false) - if err != nil { - err = errors.Errorf("batch find values: %w", err) - return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) - } + values, count, err := s.dht.store.RetrieveBatchValues(ctx, keys, false) + if err != nil { + err = errors.Errorf("batch find values: %w", err) + return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) + } - { - f := logtrace.Fields{logtrace.FieldModule: "p2p", "requested-keys": len(keys), "found": count, "sender": message.Sender.String(), logtrace.FieldRole: "server"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "network: batch get values ok", f) - } + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "requested-keys": len(keys), "found": count, "sender": message.Sender.String(), logtrace.FieldRole: "server"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "network: batch get values ok", f) + } for i, key := range keys { val := KeyValWithClosest{ @@ -1005,9 +1010,9 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, } // new a response message - resMsg := s.dht.newMessage(BatchGetValues, message.Sender, response) - resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) - return s.encodeMesage(resMsg) + resMsg := s.dht.newMessage(BatchGetValues, message.Sender, response) + resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) + return s.encodeMesage(resMsg) } func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFindValuesRequest, ip string, reqID string) (isDone bool, compressedData []byte, err error) { @@ -1178,32 +1183,34 @@ func findTopHeaviestKeys(dataMap map[string][]byte, size int) (int, []string) { } func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (res []byte, err error) { - defer func() { - if response, err := s.handlePanic(ctx, message.Sender, BatchStoreData); response != nil || err != nil { - res = response - } - }() - - request, ok := message.Data.(*BatchStoreDataRequest) - if !ok { - err := errors.New("invalid BatchStoreDataRequest") - return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) - } + defer func() { + if response, err := s.handlePanic(ctx, message.Sender, BatchStoreData); response != nil || err != nil { + res = response + } + }() + + request, ok := message.Data.(*BatchStoreDataRequest) + if !ok { + err := errors.New("invalid BatchStoreDataRequest") + return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) + } // log.P2P().WithContext(ctx).Info("handle batch store data request received") - { - f := logtrace.Fields{logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), logtrace.FieldRole: "server"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "network: batch store recv", f) - } + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), logtrace.FieldRole: "server"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "network: batch store recv", f) + } // add the sender to queries hash table s.dht.addNode(ctx, message.Sender) - if err := s.dht.store.StoreBatch(ctx, request.Data, 1, false); err != nil { - err = errors.Errorf("batch store the data: %w", err) - return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) - } + if err := s.dht.store.StoreBatch(ctx, request.Data, 1, false); err != nil { + err = errors.Errorf("batch store the data: %w", err) + return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) + } response := &StoreDataResponse{ Status: ResponseStatus{ @@ -1211,16 +1218,18 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r }, } // log.P2P().WithContext(ctx).Info("handle batch store data request processed") - { - f := logtrace.Fields{logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), logtrace.FieldRole: "server"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "network: batch store ok", f) - } + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), logtrace.FieldRole: "server"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "network: batch store ok", f) + } // new a response message - resMsg := s.dht.newMessage(BatchStoreData, message.Sender, response) - resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) - return s.encodeMesage(resMsg) + resMsg := s.dht.newMessage(BatchStoreData, message.Sender, response) + resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) + return s.encodeMesage(resMsg) } func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (res []byte, err error) { diff --git a/p2p/kademlia/rq_symbols.go b/p2p/kademlia/rq_symbols.go index 85367dec..7aa2c578 100644 --- a/p2p/kademlia/rq_symbols.go +++ b/p2p/kademlia/rq_symbols.go @@ -53,9 +53,9 @@ func (s *DHT) storeSymbols(ctx context.Context) error { } start := time.Now() logtrace.Info(wctx, "worker: dir start", logtrace.Fields{"dir": dir.Dir, "txid": dir.TXID, "symbols": preCount}) - if err := s.scanDirAndStoreSymbols(wctx, dir.Dir, dir.TXID); err != nil { - logtrace.Error(wctx, "scan and store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) - } + if err := s.scanDirAndStoreSymbols(wctx, dir.Dir, dir.TXID); err != nil { + logtrace.Error(wctx, "scan and store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) + } // Post-count remaining symbols remCount := -1 if set, rerr := utils.ReadDirFilenames(dir.Dir); rerr == nil { @@ -86,17 +86,17 @@ func (s *DHT) scanDirAndStoreSymbols(ctx context.Context, dir, txid string) erro logtrace.Info(ctx, "p2p-worker: storing ALL RaptorQ symbols", logtrace.Fields{"txid": txid, "dir": dir, "total": len(keys)}) - // Batch-flush at loadSymbolsBatchSize - for start := 0; start < len(keys); { - end := start + loadSymbolsBatchSize - if end > len(keys) { - end = len(keys) - } - if err := s.storeSymbolsInP2P(ctx, txid, dir, keys[start:end]); err != nil { - return err - } - start = end - } + // Batch-flush at loadSymbolsBatchSize + for start := 0; start < len(keys); { + end := start + loadSymbolsBatchSize + if end > len(keys) { + end = len(keys) + } + if err := s.storeSymbolsInP2P(ctx, txid, dir, keys[start:end]); err != nil { + return err + } + start = end + } // Mark this directory as completed in rqstore if err := s.rqstore.SetIsCompleted(txid); err != nil { @@ -110,7 +110,7 @@ func (s *DHT) scanDirAndStoreSymbols(ctx context.Context, dir, txid string) erro // --------------------------------------------------------------------- func (s *DHT) storeSymbolsInP2P(ctx context.Context, txid, dir string, keys []string) error { // Per-batch visibility for background worker - logtrace.Info(ctx, "worker: batch send", logtrace.Fields{"dir": dir, "keys": len(keys), logtrace.FieldTaskID: txid}) + logtrace.Info(ctx, "worker: batch send", logtrace.Fields{"dir": dir, "keys": len(keys), logtrace.FieldTaskID: txid}) start := time.Now() loaded, err := utils.LoadSymbols(dir, keys) @@ -118,11 +118,11 @@ func (s *DHT) storeSymbolsInP2P(ctx context.Context, txid, dir string, keys []st return fmt.Errorf("load symbols: %w", err) } - if err := s.StoreBatch(ctx, loaded, 1, txid); err != nil { - return fmt.Errorf("p2p store batch: %w", err) - } + if err := s.StoreBatch(ctx, loaded, 1, txid); err != nil { + return fmt.Errorf("p2p store batch: %w", err) + } - logtrace.Info(ctx, "worker: batch ok", logtrace.Fields{"dir": dir, "keys": len(loaded), "ms": time.Since(start).Milliseconds(), logtrace.FieldTaskID: txid}) + logtrace.Info(ctx, "worker: batch ok", logtrace.Fields{"dir": dir, "keys": len(loaded), "ms": time.Since(start).Milliseconds(), logtrace.FieldTaskID: txid}) if err := utils.DeleteSymbols(ctx, dir, keys); err != nil { return fmt.Errorf("delete symbols: %w", err) diff --git a/p2p/p2p.go b/p2p/p2p.go index bb38ac0c..f9a5f74e 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -40,14 +40,14 @@ type P2P interface { // p2p structure to implements interface type p2p struct { - store kademlia.Store // the store for kademlia network - metaStore kademlia.MetaStore - dht *kademlia.DHT // the kademlia network - config *Config // the service configuration - running bool // if the kademlia network is ready - lumeraClient lumera.Client - keyring keyring.Keyring // Add the keyring field - rqstore rqstore.Store + store kademlia.Store // the store for kademlia network + metaStore kademlia.MetaStore + dht *kademlia.DHT // the kademlia network + config *Config // the service configuration + running bool // if the kademlia network is ready + lumeraClient lumera.Client + keyring keyring.Keyring // Add the keyring field + rqstore rqstore.Store } // Run the kademlia network @@ -263,13 +263,13 @@ func New(ctx context.Context, config *Config, lumeraClient lumera.Client, kr key } return &p2p{ - store: store, - metaStore: meta, - config: config, - lumeraClient: lumeraClient, - keyring: kr, // Store the keyring - rqstore: rqstore, - }, nil + store: store, + metaStore: meta, + config: config, + lumeraClient: lumeraClient, + keyring: kr, // Store the keyring + rqstore: rqstore, + }, nil } // LocalStore store data into the kademlia network diff --git a/pkg/cascadekit/doc.go b/pkg/cascadekit/doc.go index ab8ce081..5fa61f7b 100644 --- a/pkg/cascadekit/doc.go +++ b/pkg/cascadekit/doc.go @@ -14,4 +14,3 @@ // - No logging; keep functions small and deterministic // - No orchestration helpers; this package exposes building blocks only package cascadekit - diff --git a/pkg/cascadekit/index.go b/pkg/cascadekit/index.go index bd9c040d..e0cb3dce 100644 --- a/pkg/cascadekit/index.go +++ b/pkg/cascadekit/index.go @@ -1,11 +1,11 @@ package cascadekit import ( - "encoding/base64" - "encoding/json" - "strings" + "encoding/base64" + "encoding/json" + "strings" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" ) // SeparatorByte is the '.' separator used when composing payloads with counters. @@ -14,50 +14,49 @@ const SeparatorByte byte = 46 // IndexFile represents the structure of the index file referenced on-chain. // The JSON fields must match the existing format. type IndexFile struct { - Version int `json:"version,omitempty"` - LayoutIDs []string `json:"layout_ids"` - LayoutSignature string `json:"layout_signature"` + Version int `json:"version,omitempty"` + LayoutIDs []string `json:"layout_ids"` + LayoutSignature string `json:"layout_signature"` } // BuildIndex creates an IndexFile from layout IDs and the layout signature. func BuildIndex(layoutIDs []string, layoutSigB64 string) IndexFile { - return IndexFile{LayoutIDs: layoutIDs, LayoutSignature: layoutSigB64} + return IndexFile{LayoutIDs: layoutIDs, LayoutSignature: layoutSigB64} } // EncodeIndexB64 marshals an index file and returns both the raw JSON and base64. func EncodeIndexB64(idx IndexFile) (b64 string, raw []byte, err error) { - raw, err = json.Marshal(idx) - if err != nil { - return "", nil, errors.Errorf("marshal index file: %w", err) - } - return base64.StdEncoding.EncodeToString(raw), raw, nil + raw, err = json.Marshal(idx) + if err != nil { + return "", nil, errors.Errorf("marshal index file: %w", err) + } + return base64.StdEncoding.EncodeToString(raw), raw, nil } // DecodeIndexB64 decodes base64(JSON(IndexFile)). func DecodeIndexB64(data string) (IndexFile, error) { - var indexFile IndexFile - decodedData, err := base64.StdEncoding.DecodeString(data) - if err != nil { - return indexFile, errors.Errorf("failed to decode index file: %w", err) - } - if err := json.Unmarshal(decodedData, &indexFile); err != nil { - return indexFile, errors.Errorf("failed to unmarshal index file: %w", err) - } - return indexFile, nil + var indexFile IndexFile + decodedData, err := base64.StdEncoding.DecodeString(data) + if err != nil { + return indexFile, errors.Errorf("failed to decode index file: %w", err) + } + if err := json.Unmarshal(decodedData, &indexFile); err != nil { + return indexFile, errors.Errorf("failed to unmarshal index file: %w", err) + } + return indexFile, nil } // ExtractIndexAndCreatorSig splits a signatures string formatted as: // Base64(index_json).Base64(creator_signature) func ExtractIndexAndCreatorSig(signatures string) (indexB64 string, creatorSigB64 string, err error) { - parts := strings.Split(signatures, ".") - if len(parts) < 2 { - return "", "", errors.New("invalid signatures format") - } - return parts[0], parts[1], nil + parts := strings.Split(signatures, ".") + if len(parts) < 2 { + return "", "", errors.New("invalid signatures format") + } + return parts[0], parts[1], nil } // MakeSignatureFormat composes the final signatures string. func MakeSignatureFormat(indexB64, creatorSigB64 string) string { - return indexB64 + "." + creatorSigB64 + return indexB64 + "." + creatorSigB64 } - diff --git a/pkg/cascadekit/index_parse.go b/pkg/cascadekit/index_parse.go index 9629398e..0fbf3dca 100644 --- a/pkg/cascadekit/index_parse.go +++ b/pkg/cascadekit/index_parse.go @@ -1,23 +1,22 @@ package cascadekit import ( - "bytes" + "bytes" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" ) // ParseCompressedIndexFile parses a compressed index file into an IndexFile. // The compressed format is: base64(IndexJSON).creator_signature.counter func ParseCompressedIndexFile(data []byte) (IndexFile, error) { - decompressed, err := utils.ZstdDecompress(data) - if err != nil { - return IndexFile{}, errors.Errorf("decompress index file: %w", err) - } - parts := bytes.Split(decompressed, []byte{SeparatorByte}) - if len(parts) < 2 { - return IndexFile{}, errors.New("invalid index file format") - } - return DecodeIndexB64(string(parts[0])) + decompressed, err := utils.ZstdDecompress(data) + if err != nil { + return IndexFile{}, errors.Errorf("decompress index file: %w", err) + } + parts := bytes.Split(decompressed, []byte{SeparatorByte}) + if len(parts) < 2 { + return IndexFile{}, errors.New("invalid index file format") + } + return DecodeIndexB64(string(parts[0])) } - diff --git a/pkg/cascadekit/metadata.go b/pkg/cascadekit/metadata.go index 79969280..534ef793 100644 --- a/pkg/cascadekit/metadata.go +++ b/pkg/cascadekit/metadata.go @@ -1,18 +1,17 @@ package cascadekit import ( - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" ) // NewCascadeMetadata creates a types.CascadeMetadata for RequestAction. // The keeper will populate rq_ids_max; rq_ids_ids is for FinalizeAction only. func NewCascadeMetadata(dataHashB64, fileName string, rqIdsIc uint64, signatures string, public bool) actiontypes.CascadeMetadata { - return actiontypes.CascadeMetadata{ - DataHash: dataHashB64, - FileName: fileName, - RqIdsIc: rqIdsIc, - Signatures: signatures, - Public: public, - } + return actiontypes.CascadeMetadata{ + DataHash: dataHashB64, + FileName: fileName, + RqIdsIc: rqIdsIc, + Signatures: signatures, + Public: public, + } } - diff --git a/pkg/cascadekit/metadata_helpers.go b/pkg/cascadekit/metadata_helpers.go index c98aaa83..94a20442 100644 --- a/pkg/cascadekit/metadata_helpers.go +++ b/pkg/cascadekit/metadata_helpers.go @@ -1,27 +1,26 @@ package cascadekit import ( - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/golang/protobuf/proto" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/golang/protobuf/proto" ) // UnmarshalCascadeMetadata decodes action metadata bytes into CascadeMetadata. func UnmarshalCascadeMetadata(raw []byte) (actiontypes.CascadeMetadata, error) { - var meta actiontypes.CascadeMetadata - if err := proto.Unmarshal(raw, &meta); err != nil { - return meta, errors.Errorf("failed to unmarshal cascade metadata: %w", err) - } - return meta, nil + var meta actiontypes.CascadeMetadata + if err := proto.Unmarshal(raw, &meta); err != nil { + return meta, errors.Errorf("failed to unmarshal cascade metadata: %w", err) + } + return meta, nil } // VerifyB64DataHash compares a raw hash with an expected base64 string. func VerifyB64DataHash(raw []byte, expectedB64 string) error { - b64 := utils.B64Encode(raw) - if string(b64) != expectedB64 { - return errors.New("data hash doesn't match") - } - return nil + b64 := utils.B64Encode(raw) + if string(b64) != expectedB64 { + return errors.New("data hash doesn't match") + } + return nil } - diff --git a/pkg/cascadekit/parsers.go b/pkg/cascadekit/parsers.go index ed8e270b..be950e4f 100644 --- a/pkg/cascadekit/parsers.go +++ b/pkg/cascadekit/parsers.go @@ -1,40 +1,39 @@ package cascadekit import ( - "bytes" + "bytes" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - json "github.com/json-iterator/go" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + json "github.com/json-iterator/go" ) // ParseRQMetadataFile parses a compressed rq metadata file into layout, signature and counter. // File format: base64(JSON(layout)).signature.counter (all parts separated by '.') func ParseRQMetadataFile(data []byte) (layout codec.Layout, signature string, counter string, err error) { - decompressed, err := utils.ZstdDecompress(data) - if err != nil { - return layout, "", "", errors.Errorf("decompress rq metadata file: %w", err) - } - - // base64EncodeMetadata.Signature.Counter - parts := bytes.Split(decompressed, []byte{SeparatorByte}) - if len(parts) != 3 { - return layout, "", "", errors.New("invalid rq metadata format: expecting 3 parts (layout, signature, counter)") - } - - layoutJson, err := utils.B64Decode(parts[0]) - if err != nil { - return layout, "", "", errors.Errorf("base64 decode failed: %w", err) - } - - if err := json.Unmarshal(layoutJson, &layout); err != nil { - return layout, "", "", errors.Errorf("unmarshal layout: %w", err) - } - - signature = string(parts[1]) - counter = string(parts[2]) - - return layout, signature, counter, nil + decompressed, err := utils.ZstdDecompress(data) + if err != nil { + return layout, "", "", errors.Errorf("decompress rq metadata file: %w", err) + } + + // base64EncodeMetadata.Signature.Counter + parts := bytes.Split(decompressed, []byte{SeparatorByte}) + if len(parts) != 3 { + return layout, "", "", errors.New("invalid rq metadata format: expecting 3 parts (layout, signature, counter)") + } + + layoutJson, err := utils.B64Decode(parts[0]) + if err != nil { + return layout, "", "", errors.Errorf("base64 decode failed: %w", err) + } + + if err := json.Unmarshal(layoutJson, &layout); err != nil { + return layout, "", "", errors.Errorf("unmarshal layout: %w", err) + } + + signature = string(parts[1]) + counter = string(parts[2]) + + return layout, signature, counter, nil } - diff --git a/pkg/cascadekit/rqid.go b/pkg/cascadekit/rqid.go index 97066b11..3a05eb94 100644 --- a/pkg/cascadekit/rqid.go +++ b/pkg/cascadekit/rqid.go @@ -1,62 +1,62 @@ package cascadekit import ( - "context" - "encoding/json" + "context" + "encoding/json" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" ) // GenRQIdentifiersFilesResponse groups the generated files and their IDs. type GenRQIdentifiersFilesResponse struct { - // IDs of the Redundant Metadata Files -- len(RQIDs) == len(RedundantMetadataFiles) - RQIDs []string - // RedundantMetadataFiles is a list of redundant files generated from the Metadata file - RedundantMetadataFiles [][]byte + // IDs of the Redundant Metadata Files -- len(RQIDs) == len(RedundantMetadataFiles) + RQIDs []string + // RedundantMetadataFiles is a list of redundant files generated from the Metadata file + RedundantMetadataFiles [][]byte } // GenerateLayoutFiles builds redundant metadata files from layout and signature. // The content is: base64(JSON(layout)).layout_signature func GenerateLayoutFiles(ctx context.Context, layout codec.Layout, layoutSigB64 string, ic uint32, max uint32) (GenRQIdentifiersFilesResponse, error) { - // Validate single-block to match package invariant - if len(layout.Blocks) != 1 { - return GenRQIdentifiersFilesResponse{}, errors.New("layout must contain exactly one block") - } - - metadataFile, err := jsonMarshal(layout) - if err != nil { - return GenRQIdentifiersFilesResponse{}, errors.Errorf("marshal layout: %w", err) - } - b64Encoded := utils.B64Encode(metadataFile) - - // Compose: base64(JSON(layout)).layout_signature - enc := make([]byte, 0, len(b64Encoded)+1+len(layoutSigB64)) - enc = append(enc, b64Encoded...) - enc = append(enc, SeparatorByte) - enc = append(enc, []byte(layoutSigB64)...) - - ids, files, err := getIDFiles(enc, ic, max) - if err != nil { - return GenRQIdentifiersFilesResponse{}, errors.Errorf("get ID Files: %w", err) - } - - return GenRQIdentifiersFilesResponse{ - RedundantMetadataFiles: files, - RQIDs: ids, - }, nil + // Validate single-block to match package invariant + if len(layout.Blocks) != 1 { + return GenRQIdentifiersFilesResponse{}, errors.New("layout must contain exactly one block") + } + + metadataFile, err := jsonMarshal(layout) + if err != nil { + return GenRQIdentifiersFilesResponse{}, errors.Errorf("marshal layout: %w", err) + } + b64Encoded := utils.B64Encode(metadataFile) + + // Compose: base64(JSON(layout)).layout_signature + enc := make([]byte, 0, len(b64Encoded)+1+len(layoutSigB64)) + enc = append(enc, b64Encoded...) + enc = append(enc, SeparatorByte) + enc = append(enc, []byte(layoutSigB64)...) + + ids, files, err := getIDFiles(enc, ic, max) + if err != nil { + return GenRQIdentifiersFilesResponse{}, errors.Errorf("get ID Files: %w", err) + } + + return GenRQIdentifiersFilesResponse{ + RedundantMetadataFiles: files, + RQIDs: ids, + }, nil } // GenerateIndexFiles generates index files and their IDs from the full signatures format. func GenerateIndexFiles(ctx context.Context, signaturesFormat string, ic uint32, max uint32) (indexIDs []string, indexFiles [][]byte, err error) { - // Use the full signatures format that matches what was sent during RequestAction - // The chain expects this exact format for ID generation - indexIDs, indexFiles, err = getIDFiles([]byte(signaturesFormat), ic, max) - if err != nil { - return nil, nil, errors.Errorf("get index ID files: %w", err) - } - return indexIDs, indexFiles, nil + // Use the full signatures format that matches what was sent during RequestAction + // The chain expects this exact format for ID generation + indexIDs, indexFiles, err = getIDFiles([]byte(signaturesFormat), ic, max) + if err != nil { + return nil, nil, errors.Errorf("get index ID files: %w", err) + } + return indexIDs, indexFiles, nil } // jsonMarshal marshals a value to JSON. diff --git a/pkg/cascadekit/signatures.go b/pkg/cascadekit/signatures.go index 6653c5bc..0c71e492 100644 --- a/pkg/cascadekit/signatures.go +++ b/pkg/cascadekit/signatures.go @@ -1,12 +1,12 @@ package cascadekit import ( - "encoding/base64" - "encoding/json" - "fmt" + "encoding/base64" + "encoding/json" + "fmt" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" ) // Signer is a function that signs the provided message and returns the raw signature bytes. @@ -15,52 +15,53 @@ type Signer func(msg []byte) ([]byte, error) // SignLayoutB64 validates single-block layout, marshals to JSON, base64-encodes it, // and signs the base64 payload, returning both the layout base64 and signature base64. func SignLayoutB64(layout codec.Layout, signer Signer) (layoutB64 string, layoutSigB64 string, err error) { - if len(layout.Blocks) != 1 { - return "", "", errors.New("layout must contain exactly one block") - } + if len(layout.Blocks) != 1 { + return "", "", errors.New("layout must contain exactly one block") + } - me, err := json.Marshal(layout) - if err != nil { - return "", "", errors.Errorf("marshal layout: %w", err) - } - layoutB64 = base64.StdEncoding.EncodeToString(me) + me, err := json.Marshal(layout) + if err != nil { + return "", "", errors.Errorf("marshal layout: %w", err) + } + layoutB64 = base64.StdEncoding.EncodeToString(me) - sig, err := signer([]byte(layoutB64)) - if err != nil { - return "", "", errors.Errorf("sign layout: %w", err) - } - layoutSigB64 = base64.StdEncoding.EncodeToString(sig) - return layoutB64, layoutSigB64, nil + sig, err := signer([]byte(layoutB64)) + if err != nil { + return "", "", errors.Errorf("sign layout: %w", err) + } + layoutSigB64 = base64.StdEncoding.EncodeToString(sig) + return layoutB64, layoutSigB64, nil } // CreateSignatures reproduces the cascade signature format and index IDs: -// Base64(index_json).Base64(creator_signature) +// +// Base64(index_json).Base64(creator_signature) +// // It validates the layout has exactly one block. func CreateSignatures(layout codec.Layout, signer Signer, ic, max uint32) (signatures string, indexIDs []string, err error) { - layoutB64, layoutSigB64, err := SignLayoutB64(layout, signer) - if err != nil { - return "", nil, err - } + layoutB64, layoutSigB64, err := SignLayoutB64(layout, signer) + if err != nil { + return "", nil, err + } - // Generate layout IDs (not returned; used to populate the index file) - layoutIDs := GenerateLayoutIDs(layoutB64, layoutSigB64, ic, max) + // Generate layout IDs (not returned; used to populate the index file) + layoutIDs := GenerateLayoutIDs(layoutB64, layoutSigB64, ic, max) - // Build and sign the index file - idx := BuildIndex(layoutIDs, layoutSigB64) - indexB64, _, err := EncodeIndexB64(idx) - if err != nil { - return "", nil, err - } + // Build and sign the index file + idx := BuildIndex(layoutIDs, layoutSigB64) + indexB64, _, err := EncodeIndexB64(idx) + if err != nil { + return "", nil, err + } - creatorSig, err := signer([]byte(indexB64)) - if err != nil { - return "", nil, errors.Errorf("sign index: %w", err) - } - creatorSigB64 := base64.StdEncoding.EncodeToString(creatorSig) - signatures = fmt.Sprintf("%s.%s", indexB64, creatorSigB64) + creatorSig, err := signer([]byte(indexB64)) + if err != nil { + return "", nil, errors.Errorf("sign index: %w", err) + } + creatorSigB64 := base64.StdEncoding.EncodeToString(creatorSig) + signatures = fmt.Sprintf("%s.%s", indexB64, creatorSigB64) - // Generate the index IDs (these are the RQIDs sent to chain) - indexIDs = GenerateIndexIDs(signatures, ic, max) - return signatures, indexIDs, nil + // Generate the index IDs (these are the RQIDs sent to chain) + indexIDs = GenerateIndexIDs(signatures, ic, max) + return signatures, indexIDs, nil } - diff --git a/pkg/cascadekit/verify.go b/pkg/cascadekit/verify.go index 4e7217be..5c4ff8a4 100644 --- a/pkg/cascadekit/verify.go +++ b/pkg/cascadekit/verify.go @@ -1,23 +1,22 @@ package cascadekit import ( - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" ) // VerifySingleBlockIDs enforces single-block layouts and verifies that the // symbols and block hash of ticket and local layouts match for block 0. func VerifySingleBlockIDs(ticket, local codec.Layout) error { - if len(ticket.Blocks) != 1 || len(local.Blocks) != 1 { - return errors.New("layout must contain exactly one block") - } - if err := utils.EqualStrList(ticket.Blocks[0].Symbols, local.Blocks[0].Symbols); err != nil { - return errors.Errorf("symbol identifiers don't match: %w", err) - } - if ticket.Blocks[0].Hash != local.Blocks[0].Hash { - return errors.New("block hashes don't match") - } - return nil + if len(ticket.Blocks) != 1 || len(local.Blocks) != 1 { + return errors.New("layout must contain exactly one block") + } + if err := utils.EqualStrList(ticket.Blocks[0].Symbols, local.Blocks[0].Symbols); err != nil { + return errors.Errorf("symbol identifiers don't match: %w", err) + } + if ticket.Blocks[0].Hash != local.Blocks[0].Hash { + return errors.New("block hashes don't match") + } + return nil } - diff --git a/pkg/logtrace/datadog.go b/pkg/logtrace/datadog.go index 6fb0ba86..5c739d12 100644 --- a/pkg/logtrace/datadog.go +++ b/pkg/logtrace/datadog.go @@ -116,14 +116,14 @@ func ddForward(level zapcore.Level, ctx context.Context, msg string, fields Fiel for k, v := range fields { attrs[k] = v } - // Attach correlation ID if present - if cid := extractCorrelationID(ctx); cid != "unknown" { - attrs["correlation_id"] = cid - } - // Attach origin/phase if present (first_pass | worker | download) - if o := OriginFromContext(ctx); o != "" { - attrs["origin"] = o - } + // Attach correlation ID if present + if cid := extractCorrelationID(ctx); cid != "unknown" { + attrs["correlation_id"] = cid + } + // Attach origin/phase if present (first_pass | worker | download) + if o := OriginFromContext(ctx); o != "" { + attrs["origin"] = o + } entry := map[string]any{ "message": msg, diff --git a/pkg/lumera/modules/action_msg/helpers.go b/pkg/lumera/modules/action_msg/helpers.go index 6de5fb9f..b3b44193 100644 --- a/pkg/lumera/modules/action_msg/helpers.go +++ b/pkg/lumera/modules/action_msg/helpers.go @@ -1,58 +1,58 @@ package action_msg import ( - "fmt" - "strconv" - "time" + "fmt" + "strconv" + "time" - actionapi "github.com/LumeraProtocol/lumera/api/lumera/action" - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera/util" - "google.golang.org/protobuf/encoding/protojson" + actionapi "github.com/LumeraProtocol/lumera/api/lumera/action" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/util" + "google.golang.org/protobuf/encoding/protojson" ) func validateRequestActionParams(actionType, metadata, price, expirationTime string) error { - if actionType == "" { - return fmt.Errorf("action type cannot be empty") - } - if metadata == "" { - return fmt.Errorf("metadata cannot be empty") - } - if price == "" { - return fmt.Errorf("price cannot be empty") - } - // Validate price: must be integer coin in ulume (e.g., "1000ulume") - if err := util.ValidateUlumeIntCoin(price); err != nil { - return fmt.Errorf("invalid price: %w", err) - } - if expirationTime == "" { - return fmt.Errorf("expiration time cannot be empty") - } - // Validate expiration is a future unix timestamp - exp, err := strconv.ParseInt(expirationTime, 10, 64) - if err != nil { - return fmt.Errorf("invalid expirationTime: %w", err) - } - // Allow small clock skew; require strictly in the future - if exp <= time.Now().Add(30*time.Second).Unix() { - return fmt.Errorf("expiration time must be in the future") - } - return nil + if actionType == "" { + return fmt.Errorf("action type cannot be empty") + } + if metadata == "" { + return fmt.Errorf("metadata cannot be empty") + } + if price == "" { + return fmt.Errorf("price cannot be empty") + } + // Validate price: must be integer coin in ulume (e.g., "1000ulume") + if err := util.ValidateUlumeIntCoin(price); err != nil { + return fmt.Errorf("invalid price: %w", err) + } + if expirationTime == "" { + return fmt.Errorf("expiration time cannot be empty") + } + // Validate expiration is a future unix timestamp + exp, err := strconv.ParseInt(expirationTime, 10, 64) + if err != nil { + return fmt.Errorf("invalid expirationTime: %w", err) + } + // Allow small clock skew; require strictly in the future + if exp <= time.Now().Add(30*time.Second).Unix() { + return fmt.Errorf("expiration time must be in the future") + } + return nil } func validateFinalizeActionParams(actionId string, rqIdsIds []string) error { - if actionId == "" { - return fmt.Errorf("action ID cannot be empty") - } - if len(rqIdsIds) == 0 { - return fmt.Errorf("rq_ids_ids cannot be empty for cascade action") - } - for i, s := range rqIdsIds { - if s == "" { - return fmt.Errorf("rq_ids_ids[%d] cannot be empty", i) - } - } - return nil + if actionId == "" { + return fmt.Errorf("action ID cannot be empty") + } + if len(rqIdsIds) == 0 { + return fmt.Errorf("rq_ids_ids cannot be empty for cascade action") + } + for i, s := range rqIdsIds { + if s == "" { + return fmt.Errorf("rq_ids_ids[%d] cannot be empty", i) + } + } + return nil } func createRequestActionMessage(creator, actionType, metadata, price, expirationTime string) *actiontypes.MsgRequestAction { diff --git a/pkg/lumera/util/coin.go b/pkg/lumera/util/coin.go index 561f5560..6632c1fe 100644 --- a/pkg/lumera/util/coin.go +++ b/pkg/lumera/util/coin.go @@ -1,35 +1,34 @@ package util import ( - "fmt" - "strings" + "fmt" + "strings" ) // ValidateUlumeIntCoin checks that the input is a positive integer amount // with the 'ulume' denom, e.g., "1000ulume". It keeps validation simple // without pulling in SDK dependencies. func ValidateUlumeIntCoin(s string) error { - const denom = "ulume" - if !strings.HasSuffix(s, denom) { - return fmt.Errorf("denom must be '%s'", denom) - } - num := s[:len(s)-len(denom)] - if num == "" { - return fmt.Errorf("amount is required before denom") - } - // must be all digits, no leading +/-, no decimals - var val uint64 - for i := 0; i < len(num); i++ { - c := num[i] - if c < '0' || c > '9' { - return fmt.Errorf("amount must be an integer number") - } - // simple overflow-safe accumulation for uint64 - val = val*10 + uint64(c-'0') - } - if val == 0 { - return fmt.Errorf("amount must be greater than zero") - } - return nil + const denom = "ulume" + if !strings.HasSuffix(s, denom) { + return fmt.Errorf("denom must be '%s'", denom) + } + num := s[:len(s)-len(denom)] + if num == "" { + return fmt.Errorf("amount is required before denom") + } + // must be all digits, no leading +/-, no decimals + var val uint64 + for i := 0; i < len(num); i++ { + c := num[i] + if c < '0' || c > '9' { + return fmt.Errorf("amount must be an integer number") + } + // simple overflow-safe accumulation for uint64 + val = val*10 + uint64(c-'0') + } + if val == 0 { + return fmt.Errorf("amount must be greater than zero") + } + return nil } - diff --git a/sdk/adapters/lumera/adapter.go b/sdk/adapters/lumera/adapter.go index bacf8cd2..042c2273 100644 --- a/sdk/adapters/lumera/adapter.go +++ b/sdk/adapters/lumera/adapter.go @@ -26,12 +26,12 @@ type Client interface { GetSupernodeWithLatestAddress(ctx context.Context, address string) (*SuperNodeInfo, error) DecodeCascadeMetadata(ctx context.Context, action Action) (actiontypes.CascadeMetadata, error) VerifySignature(ctx context.Context, accountAddr string, data []byte, signature []byte) error - // GetBalance returns the bank balance for the given address and denom. - GetBalance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) - // GetActionParams returns the action module parameters. - GetActionParams(ctx context.Context) (*actiontypes.QueryParamsResponse, error) - // GetActionFee returns the fee amount for a given data size (in KB) for RequestAction. - GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) + // GetBalance returns the bank balance for the given address and denom. + GetBalance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) + // GetActionParams returns the action module parameters. + GetActionParams(ctx context.Context) (*actiontypes.QueryParamsResponse, error) + // GetActionFee returns the fee amount for a given data size (in KB) for RequestAction. + GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) } // SuperNodeInfo contains supernode information with latest address @@ -224,20 +224,20 @@ func (a *Adapter) VerifySignature(ctx context.Context, accountAddr string, data, // GetActionParams fetches the action module parameters via the underlying lumera client. func (a *Adapter) GetActionParams(ctx context.Context) (*actiontypes.QueryParamsResponse, error) { - resp, err := a.client.Action().GetParams(ctx) - if err != nil { - return nil, fmt.Errorf("get action params: %w", err) - } - return resp, nil + resp, err := a.client.Action().GetParams(ctx) + if err != nil { + return nil, fmt.Errorf("get action params: %w", err) + } + return resp, nil } // GetActionFee fetches the action fee for a given data size (in KB). func (a *Adapter) GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) { - resp, err := a.client.Action().GetActionFee(ctx, dataSizeKB) - if err != nil { - return nil, fmt.Errorf("get action fee: %w", err) - } - return resp, nil + resp, err := a.client.Action().GetActionFee(ctx, dataSizeKB) + if err != nil { + return nil, fmt.Errorf("get action fee: %w", err) + } + return resp, nil } // GetBalance fetches the balance for a given address and denom via the underlying lumera client. diff --git a/sdk/adapters/supernodeservice/types.go b/sdk/adapters/supernodeservice/types.go index 89e04cae..1ba82c8f 100644 --- a/sdk/adapters/supernodeservice/types.go +++ b/sdk/adapters/supernodeservice/types.go @@ -1,12 +1,12 @@ package supernodeservice import ( - "context" + "context" - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "google.golang.org/grpc" + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "google.golang.org/grpc" - "github.com/LumeraProtocol/supernode/v2/sdk/event" + "github.com/LumeraProtocol/supernode/v2/sdk/event" ) type LoggerFunc func( @@ -46,7 +46,7 @@ type CascadeSupernodeDownloadResponse struct { //go:generate mockery --name=CascadeServiceClient --output=testutil/mocks --outpkg=mocks --filename=cascade_service_mock.go type CascadeServiceClient interface { - CascadeSupernodeRegister(ctx context.Context, in *CascadeSupernodeRegisterRequest, opts ...grpc.CallOption) (*CascadeSupernodeRegisterResponse, error) - GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) - CascadeSupernodeDownload(ctx context.Context, in *CascadeSupernodeDownloadRequest, opts ...grpc.CallOption) (*CascadeSupernodeDownloadResponse, error) + CascadeSupernodeRegister(ctx context.Context, in *CascadeSupernodeRegisterRequest, opts ...grpc.CallOption) (*CascadeSupernodeRegisterResponse, error) + GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) + CascadeSupernodeDownload(ctx context.Context, in *CascadeSupernodeDownloadRequest, opts ...grpc.CallOption) (*CascadeSupernodeDownloadResponse, error) } diff --git a/sdk/event/keys.go b/sdk/event/keys.go index 04e27bd3..b138929f 100644 --- a/sdk/event/keys.go +++ b/sdk/event/keys.go @@ -30,5 +30,5 @@ const ( KeyTaskID EventDataKey = "task_id" KeyActionID EventDataKey = "action_id" - // Removed legacy cascade storage/retrieve metrics keys + // Removed legacy cascade storage/retrieve metrics keys ) diff --git a/sdk/event/types.go b/sdk/event/types.go index 635b1e2f..10f44856 100644 --- a/sdk/event/types.go +++ b/sdk/event/types.go @@ -14,7 +14,7 @@ type EventType string // These events are used to track the progress of tasks // and to notify subscribers about important changes in the system. const ( - SDKTaskStarted EventType = "sdk:started" + SDKTaskStarted EventType = "sdk:started" SDKSupernodesUnavailable EventType = "sdk:supernodes_unavailable" SDKSupernodesFound EventType = "sdk:supernodes_found" SDKRegistrationAttempt EventType = "sdk:registration_attempt" @@ -22,41 +22,41 @@ const ( SDKRegistrationSuccessful EventType = "sdk:registration_successful" SDKTaskTxHashReceived EventType = "sdk:txhash_received" SDKTaskCompleted EventType = "sdk:completed" - SDKTaskFailed EventType = "sdk:failed" - SDKConnectionEstablished EventType = "sdk:connection_established" - // Upload/processing phase events for cascade registration - SDKUploadStarted EventType = "sdk:upload_started" - SDKUploadCompleted EventType = "sdk:upload_completed" - SDKUploadFailed EventType = "sdk:upload_failed" // reason includes timeout - SDKProcessingStarted EventType = "sdk:processing_started" - SDKProcessingFailed EventType = "sdk:processing_failed" - SDKProcessingTimeout EventType = "sdk:processing_timeout" + SDKTaskFailed EventType = "sdk:failed" + SDKConnectionEstablished EventType = "sdk:connection_established" + // Upload/processing phase events for cascade registration + SDKUploadStarted EventType = "sdk:upload_started" + SDKUploadCompleted EventType = "sdk:upload_completed" + SDKUploadFailed EventType = "sdk:upload_failed" // reason includes timeout + SDKProcessingStarted EventType = "sdk:processing_started" + SDKProcessingFailed EventType = "sdk:processing_failed" + SDKProcessingTimeout EventType = "sdk:processing_timeout" - SDKDownloadAttempt EventType = "sdk:download_attempt" - SDKDownloadFailure EventType = "sdk:download_failure" - SDKDownloadStarted EventType = "sdk:download_started" - SDKDownloadCompleted EventType = "sdk:download_completed" + SDKDownloadAttempt EventType = "sdk:download_attempt" + SDKDownloadFailure EventType = "sdk:download_failure" + SDKDownloadStarted EventType = "sdk:download_started" + SDKDownloadCompleted EventType = "sdk:download_completed" ) const ( - SupernodeActionRetrieved EventType = "supernode:action_retrieved" - SupernodeActionFeeVerified EventType = "supernode:action_fee_verified" - SupernodeTopCheckPassed EventType = "supernode:top_check_passed" - SupernodeMetadataDecoded EventType = "supernode:metadata_decoded" - SupernodeDataHashVerified EventType = "supernode:data_hash_verified" - SupernodeInputEncoded EventType = "supernode:input_encoded" - SupernodeSignatureVerified EventType = "supernode:signature_verified" - SupernodeRQIDGenerated EventType = "supernode:rqid_generated" - SupernodeRQIDVerified EventType = "supernode:rqid_verified" - SupernodeFinalizeSimulated EventType = "supernode:finalize_simulated" - SupernodeArtefactsStored EventType = "supernode:artefacts_stored" - SupernodeActionFinalized EventType = "supernode:action_finalized" - SupernodeArtefactsDownloaded EventType = "supernode:artefacts_downloaded" - SupernodeNetworkRetrieveStarted EventType = "supernode:network_retrieve_started" - SupernodeDecodeCompleted EventType = "supernode:decode_completed" - SupernodeServeReady EventType = "supernode:serve_ready" - SupernodeUnknown EventType = "supernode:unknown" - SupernodeFinalizeSimulationFailed EventType = "supernode:finalize_simulation_failed" + SupernodeActionRetrieved EventType = "supernode:action_retrieved" + SupernodeActionFeeVerified EventType = "supernode:action_fee_verified" + SupernodeTopCheckPassed EventType = "supernode:top_check_passed" + SupernodeMetadataDecoded EventType = "supernode:metadata_decoded" + SupernodeDataHashVerified EventType = "supernode:data_hash_verified" + SupernodeInputEncoded EventType = "supernode:input_encoded" + SupernodeSignatureVerified EventType = "supernode:signature_verified" + SupernodeRQIDGenerated EventType = "supernode:rqid_generated" + SupernodeRQIDVerified EventType = "supernode:rqid_verified" + SupernodeFinalizeSimulated EventType = "supernode:finalize_simulated" + SupernodeArtefactsStored EventType = "supernode:artefacts_stored" + SupernodeActionFinalized EventType = "supernode:action_finalized" + SupernodeArtefactsDownloaded EventType = "supernode:artefacts_downloaded" + SupernodeNetworkRetrieveStarted EventType = "supernode:network_retrieve_started" + SupernodeDecodeCompleted EventType = "supernode:decode_completed" + SupernodeServeReady EventType = "supernode:serve_ready" + SupernodeUnknown EventType = "supernode:unknown" + SupernodeFinalizeSimulationFailed EventType = "supernode:finalize_simulation_failed" ) // EventData is a map of event data attributes using standardized keys diff --git a/sdk/net/client.go b/sdk/net/client.go index b88fe75b..96e5d7f5 100644 --- a/sdk/net/client.go +++ b/sdk/net/client.go @@ -1,12 +1,12 @@ package net import ( - "context" + "context" - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" ) // SupernodeClient defines the interface for communicating with supernodes @@ -16,7 +16,7 @@ type SupernodeClient interface { // HealthCheck performs a health check on the supernode HealthCheck(ctx context.Context) (*grpc_health_v1.HealthCheckResponse, error) - GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) + GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) // Download downloads the cascade action file Download(ctx context.Context, in *supernodeservice.CascadeSupernodeDownloadRequest, opts ...grpc.CallOption) (*supernodeservice.CascadeSupernodeDownloadResponse, error) diff --git a/sdk/net/impl.go b/sdk/net/impl.go index cd6bf10f..77ac7de9 100644 --- a/sdk/net/impl.go +++ b/sdk/net/impl.go @@ -1,21 +1,21 @@ package net import ( - "context" - "fmt" - - "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" - ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials" - "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials/alts/conn" - "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/client" - "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" - "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" - "github.com/LumeraProtocol/supernode/v2/sdk/log" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "github.com/cosmos/cosmos-sdk/crypto/keyring" - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" + "context" + "fmt" + + "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" + ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials" + "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials/alts/conn" + "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/client" + "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" + "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" + "github.com/LumeraProtocol/supernode/v2/sdk/log" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" ) // supernodeClient implements the SupernodeClient interface @@ -130,13 +130,13 @@ func (c *supernodeClient) HealthCheck(ctx context.Context) (*grpc_health_v1.Heal } func (c *supernodeClient) GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) { - resp, err := c.cascadeClient.GetSupernodeStatus(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get supernode status: %w", err) - } + resp, err := c.cascadeClient.GetSupernodeStatus(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get supernode status: %w", err) + } - c.logger.Debug(ctx, "Supernode status retrieved successfully") - return resp, nil + c.logger.Debug(ctx, "Supernode status retrieved successfully") + return resp, nil } // Download downloads the cascade action file diff --git a/sdk/task/timeouts.go b/sdk/task/timeouts.go index f6e1e7e6..4498fdaf 100644 --- a/sdk/task/timeouts.go +++ b/sdk/task/timeouts.go @@ -5,4 +5,3 @@ import "time" // connectionTimeout bounds supernode health/connection probing. // Keep this short to preserve snappy discovery without impacting long uploads. const connectionTimeout = 10 * time.Second - diff --git a/supernode/cascade/adaptors/lumera.go b/supernode/cascade/adaptors/lumera.go index 2bd4ad27..958e7701 100644 --- a/supernode/cascade/adaptors/lumera.go +++ b/supernode/cascade/adaptors/lumera.go @@ -1,48 +1,47 @@ package adaptors import ( - "context" + "context" - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - sdktx "github.com/cosmos/cosmos-sdk/types/tx" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + sdktx "github.com/cosmos/cosmos-sdk/types/tx" ) type LumeraClient interface { - GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) - GetTopSupernodes(ctx context.Context, blockHeight uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) - Verify(ctx context.Context, address string, msg []byte, sig []byte) error - GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) - SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) - FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) + GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) + GetTopSupernodes(ctx context.Context, blockHeight uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) + Verify(ctx context.Context, address string, msg []byte, sig []byte) error + GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) + SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) + FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) } -type lumeraImpl struct { c lumera.Client } +type lumeraImpl struct{ c lumera.Client } func NewLumeraClient(c lumera.Client) LumeraClient { return &lumeraImpl{c: c} } func (l *lumeraImpl) GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) { - return l.c.Action().GetAction(ctx, actionID) + return l.c.Action().GetAction(ctx, actionID) } func (l *lumeraImpl) GetTopSupernodes(ctx context.Context, blockHeight uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) { - return l.c.SuperNode().GetTopSuperNodesForBlock(ctx, blockHeight) + return l.c.SuperNode().GetTopSuperNodesForBlock(ctx, blockHeight) } func (l *lumeraImpl) Verify(ctx context.Context, address string, msg []byte, sig []byte) error { - return l.c.Auth().Verify(ctx, address, msg, sig) + return l.c.Auth().Verify(ctx, address, msg, sig) } func (l *lumeraImpl) GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) { - return l.c.Action().GetActionFee(ctx, dataSizeKB) + return l.c.Action().GetActionFee(ctx, dataSizeKB) } func (l *lumeraImpl) SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) { - return l.c.ActionMsg().SimulateFinalizeCascadeAction(ctx, actionID, rqids) + return l.c.ActionMsg().SimulateFinalizeCascadeAction(ctx, actionID, rqids) } func (l *lumeraImpl) FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) { - return l.c.ActionMsg().FinalizeCascadeAction(ctx, actionID, rqids) + return l.c.ActionMsg().FinalizeCascadeAction(ctx, actionID, rqids) } - diff --git a/supernode/cascade/adaptors/p2p.go b/supernode/cascade/adaptors/p2p.go index 8f5e81bb..857df09a 100644 --- a/supernode/cascade/adaptors/p2p.go +++ b/supernode/cascade/adaptors/p2p.go @@ -1,116 +1,195 @@ package adaptors import ( - "context" - "fmt" - "io/fs" - "math" - "math/rand/v2" - "path/filepath" - "sort" - "strings" - "time" + "context" + "fmt" + "io/fs" + "math" + "math/rand/v2" + "path/filepath" + "sort" + "strings" + "time" - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/p2p" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" ) const ( - loadSymbolsBatchSize = 3000 - storeSymbolsPercent = 18 - storeBatchContextTimeout = 3 * time.Minute - P2PDataRaptorQSymbol = 1 + loadSymbolsBatchSize = 3000 + storeSymbolsPercent = 18 + storeBatchContextTimeout = 3 * time.Minute + P2PDataRaptorQSymbol = 1 ) -type P2PService interface { StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error } +type P2PService interface { + StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error +} -type p2pImpl struct { p2p p2p.Client; rqStore rqstore.Store } +type p2pImpl struct { + p2p p2p.Client + rqStore rqstore.Store +} -func NewP2PService(client p2p.Client, store rqstore.Store) P2PService { return &p2pImpl{p2p: client, rqStore: store} } +func NewP2PService(client p2p.Client, store rqstore.Store) P2PService { + return &p2pImpl{p2p: client, rqStore: store} +} -type StoreArtefactsRequest struct { TaskID string; ActionID string; IDFiles [][]byte; SymbolsDir string } +type StoreArtefactsRequest struct { + TaskID string + ActionID string + IDFiles [][]byte + SymbolsDir string +} func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error { - logtrace.Info(ctx, "store: p2p start", logtrace.Fields{"taskID": req.TaskID, "actionID": req.ActionID, "id_files": len(req.IDFiles), "symbols_dir": req.SymbolsDir}) - start := time.Now() - firstPassSymbols, totalSymbols, err := p.storeCascadeSymbolsAndData(ctx, req.TaskID, req.ActionID, req.SymbolsDir, req.IDFiles) - if err != nil { return fmt.Errorf("error storing artefacts: %w", err) } - _ = firstPassSymbols; _ = totalSymbols; _ = start - remaining := 0 - if req.SymbolsDir != "" { if keys, werr := walkSymbolTree(req.SymbolsDir); werr == nil { remaining = len(keys) } } - logtrace.Info(ctx, "store: first-pass complete", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total_available": totalSymbols, "id_files_count": len(req.IDFiles), "symbols_left_on_disk": remaining, "ms": time.Since(start).Milliseconds()}) - if remaining == 0 { logtrace.Info(ctx, "store: dir empty after first-pass", logtrace.Fields{"taskID": req.TaskID, "dir": req.SymbolsDir}) } - return nil + logtrace.Info(ctx, "store: p2p start", logtrace.Fields{"taskID": req.TaskID, "actionID": req.ActionID, "id_files": len(req.IDFiles), "symbols_dir": req.SymbolsDir}) + start := time.Now() + firstPassSymbols, totalSymbols, err := p.storeCascadeSymbolsAndData(ctx, req.TaskID, req.ActionID, req.SymbolsDir, req.IDFiles) + if err != nil { + return fmt.Errorf("error storing artefacts: %w", err) + } + _ = firstPassSymbols + _ = totalSymbols + _ = start + remaining := 0 + if req.SymbolsDir != "" { + if keys, werr := walkSymbolTree(req.SymbolsDir); werr == nil { + remaining = len(keys) + } + } + logtrace.Info(ctx, "store: first-pass complete", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total_available": totalSymbols, "id_files_count": len(req.IDFiles), "symbols_left_on_disk": remaining, "ms": time.Since(start).Milliseconds()}) + if remaining == 0 { + logtrace.Info(ctx, "store: dir empty after first-pass", logtrace.Fields{"taskID": req.TaskID, "dir": req.SymbolsDir}) + } + return nil } func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, actionID string, symbolsDir string, metadataFiles [][]byte) (int, int, error) { - if err := p.rqStore.StoreSymbolDirectory(taskID, symbolsDir); err != nil { return 0, 0, fmt.Errorf("store symbol dir: %w", err) } - keys, err := walkSymbolTree(symbolsDir); if err != nil { return 0, 0, err } - totalAvailable := len(keys) - targetCount := int(math.Ceil(float64(totalAvailable) * storeSymbolsPercent / 100.0)) - if targetCount < 1 && totalAvailable > 0 { targetCount = 1 } - logtrace.Info(ctx, "store: symbols discovered", logtrace.Fields{"total_symbols": totalAvailable, "dir": symbolsDir}) - logtrace.Info(ctx, "store: target coverage", logtrace.Fields{"total_symbols": totalAvailable, "target_percent": storeSymbolsPercent, "target_count": targetCount}) - if len(keys) > loadSymbolsBatchSize { - want := targetCount - if want < len(keys) { rand.Shuffle(len(keys), func(i, j int) { keys[i], keys[j] = keys[j], keys[i] }); keys = keys[:want] } - sort.Strings(keys) - } - logtrace.Info(ctx, "store: selected symbols", logtrace.Fields{"selected": len(keys), "of_total": totalAvailable, "dir": symbolsDir}) - logtrace.Info(ctx, "store: sending symbols", logtrace.Fields{"count": len(keys)}) - totalSymbols := 0 - firstBatchProcessed := false - for start := 0; start < len(keys); { - end := min(start+loadSymbolsBatchSize, len(keys)) - batch := keys[start:end] - if !firstBatchProcessed && len(metadataFiles) > 0 { - roomForSymbols := loadSymbolsBatchSize - len(metadataFiles) - if roomForSymbols < 0 { roomForSymbols = 0 } - if roomForSymbols < len(batch) { batch = batch[:roomForSymbols]; end = start + roomForSymbols } - symBytes, err := utils.LoadSymbols(symbolsDir, batch); if err != nil { return 0, 0, fmt.Errorf("load symbols: %w", err) } - payload := make([][]byte, 0, len(metadataFiles)+len(symBytes)); payload = append(payload, metadataFiles...); payload = append(payload, symBytes...) - logtrace.Info(ctx, "store: batch send (first)", logtrace.Fields{"taskID": taskID, "metadata_count": len(metadataFiles), "symbols_in_batch": len(symBytes), "payload_total": len(payload)}) - bctx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout); err = p.p2p.StoreBatch(bctx, payload, P2PDataRaptorQSymbol, taskID); cancel(); if err != nil { return totalSymbols, totalAvailable, fmt.Errorf("p2p store batch (first): %w", err) } - logtrace.Info(ctx, "store: batch ok (first)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symBytes)}) - totalSymbols += len(symBytes) - if len(batch) > 0 { if err := utils.DeleteSymbols(ctx, symbolsDir, batch); err != nil { return totalSymbols, totalAvailable, fmt.Errorf("delete symbols: %w", err) } } - firstBatchProcessed = true - } else { - count, err := p.storeSymbolsInP2P(ctx, taskID, symbolsDir, batch); if err != nil { return totalSymbols, totalAvailable, err } - totalSymbols += count - } - start = end - } - if err := p.rqStore.UpdateIsFirstBatchStored(actionID); err != nil { return totalSymbols, totalAvailable, fmt.Errorf("update first-batch flag: %w", err) } - return totalSymbols, totalAvailable, nil + if err := p.rqStore.StoreSymbolDirectory(taskID, symbolsDir); err != nil { + return 0, 0, fmt.Errorf("store symbol dir: %w", err) + } + keys, err := walkSymbolTree(symbolsDir) + if err != nil { + return 0, 0, err + } + totalAvailable := len(keys) + targetCount := int(math.Ceil(float64(totalAvailable) * storeSymbolsPercent / 100.0)) + if targetCount < 1 && totalAvailable > 0 { + targetCount = 1 + } + logtrace.Info(ctx, "store: symbols discovered", logtrace.Fields{"total_symbols": totalAvailable, "dir": symbolsDir}) + logtrace.Info(ctx, "store: target coverage", logtrace.Fields{"total_symbols": totalAvailable, "target_percent": storeSymbolsPercent, "target_count": targetCount}) + if len(keys) > loadSymbolsBatchSize { + want := targetCount + if want < len(keys) { + rand.Shuffle(len(keys), func(i, j int) { keys[i], keys[j] = keys[j], keys[i] }) + keys = keys[:want] + } + sort.Strings(keys) + } + logtrace.Info(ctx, "store: selected symbols", logtrace.Fields{"selected": len(keys), "of_total": totalAvailable, "dir": symbolsDir}) + logtrace.Info(ctx, "store: sending symbols", logtrace.Fields{"count": len(keys)}) + totalSymbols := 0 + firstBatchProcessed := false + for start := 0; start < len(keys); { + end := min(start+loadSymbolsBatchSize, len(keys)) + batch := keys[start:end] + if !firstBatchProcessed && len(metadataFiles) > 0 { + roomForSymbols := loadSymbolsBatchSize - len(metadataFiles) + if roomForSymbols < 0 { + roomForSymbols = 0 + } + if roomForSymbols < len(batch) { + batch = batch[:roomForSymbols] + end = start + roomForSymbols + } + symBytes, err := utils.LoadSymbols(symbolsDir, batch) + if err != nil { + return 0, 0, fmt.Errorf("load symbols: %w", err) + } + payload := make([][]byte, 0, len(metadataFiles)+len(symBytes)) + payload = append(payload, metadataFiles...) + payload = append(payload, symBytes...) + logtrace.Info(ctx, "store: batch send (first)", logtrace.Fields{"taskID": taskID, "metadata_count": len(metadataFiles), "symbols_in_batch": len(symBytes), "payload_total": len(payload)}) + bctx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) + err = p.p2p.StoreBatch(bctx, payload, P2PDataRaptorQSymbol, taskID) + cancel() + if err != nil { + return totalSymbols, totalAvailable, fmt.Errorf("p2p store batch (first): %w", err) + } + logtrace.Info(ctx, "store: batch ok (first)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symBytes)}) + totalSymbols += len(symBytes) + if len(batch) > 0 { + if err := utils.DeleteSymbols(ctx, symbolsDir, batch); err != nil { + return totalSymbols, totalAvailable, fmt.Errorf("delete symbols: %w", err) + } + } + firstBatchProcessed = true + } else { + count, err := p.storeSymbolsInP2P(ctx, taskID, symbolsDir, batch) + if err != nil { + return totalSymbols, totalAvailable, err + } + totalSymbols += count + } + start = end + } + if err := p.rqStore.UpdateIsFirstBatchStored(actionID); err != nil { + return totalSymbols, totalAvailable, fmt.Errorf("update first-batch flag: %w", err) + } + return totalSymbols, totalAvailable, nil } func walkSymbolTree(root string) ([]string, error) { - var keys []string - err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { - if err != nil { return err } - if d.IsDir() { return nil } - if strings.EqualFold(filepath.Ext(d.Name()), ".json") { return nil } - rel, err := filepath.Rel(root, path); if err != nil { return err } - keys = append(keys, rel) - return nil - }) - if err != nil { return nil, fmt.Errorf("walk symbol tree: %w", err) } - return keys, nil + var keys []string + err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return nil + } + if strings.EqualFold(filepath.Ext(d.Name()), ".json") { + return nil + } + rel, err := filepath.Rel(root, path) + if err != nil { + return err + } + keys = append(keys, rel) + return nil + }) + if err != nil { + return nil, fmt.Errorf("walk symbol tree: %w", err) + } + return keys, nil } func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) (int, error) { - logtrace.Debug(ctx, "loading batch symbols", logtrace.Fields{"taskID": taskID, "count": len(fileKeys)}) - symbols, err := utils.LoadSymbols(root, fileKeys); if err != nil { return 0, fmt.Errorf("load symbols: %w", err) } - symCtx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout); defer cancel() - logtrace.Info(ctx, "store: batch send (symbols)", logtrace.Fields{"taskID": taskID, "symbols_in_batch": len(symbols)}) - if err := c.p2p.StoreBatch(symCtx, symbols, P2PDataRaptorQSymbol, taskID); err != nil { return len(symbols), fmt.Errorf("p2p store batch: %w", err) } - logtrace.Info(ctx, "store: batch ok (symbols)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symbols)}) - if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { return len(symbols), fmt.Errorf("delete symbols: %w", err) } - return len(symbols), nil + logtrace.Debug(ctx, "loading batch symbols", logtrace.Fields{"taskID": taskID, "count": len(fileKeys)}) + symbols, err := utils.LoadSymbols(root, fileKeys) + if err != nil { + return 0, fmt.Errorf("load symbols: %w", err) + } + symCtx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) + defer cancel() + logtrace.Info(ctx, "store: batch send (symbols)", logtrace.Fields{"taskID": taskID, "symbols_in_batch": len(symbols)}) + if err := c.p2p.StoreBatch(symCtx, symbols, P2PDataRaptorQSymbol, taskID); err != nil { + return len(symbols), fmt.Errorf("p2p store batch: %w", err) + } + logtrace.Info(ctx, "store: batch ok (symbols)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symbols)}) + if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { + return len(symbols), fmt.Errorf("delete symbols: %w", err) + } + return len(symbols), nil } -func min(a, b int) int { if a < b { return a }; return b } - +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/supernode/cascade/adaptors/rq.go b/supernode/cascade/adaptors/rq.go index a5ef9389..a6fd4250 100644 --- a/supernode/cascade/adaptors/rq.go +++ b/supernode/cascade/adaptors/rq.go @@ -1,31 +1,31 @@ package adaptors import ( - "context" + "context" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" ) // CodecService wraps codec operations used by cascade type CodecService interface { - EncodeInput(ctx context.Context, actionID string, path string, dataSize int) (EncodeResult, error) - Decode(ctx context.Context, req DecodeRequest) (DecodeResult, error) + EncodeInput(ctx context.Context, actionID string, path string, dataSize int) (EncodeResult, error) + Decode(ctx context.Context, req DecodeRequest) (DecodeResult, error) } type EncodeResult struct { - SymbolsDir string - Metadata codec.Layout + SymbolsDir string + Metadata codec.Layout } type DecodeRequest struct { - ActionID string - Symbols map[string][]byte - Layout codec.Layout + ActionID string + Symbols map[string][]byte + Layout codec.Layout } type DecodeResult struct { - FilePath string - DecodeTmpDir string + FilePath string + DecodeTmpDir string } type codecImpl struct{ codec codec.Codec } @@ -33,14 +33,17 @@ type codecImpl struct{ codec codec.Codec } func NewCodecService(c codec.Codec) CodecService { return &codecImpl{codec: c} } func (c *codecImpl) EncodeInput(ctx context.Context, actionID, path string, dataSize int) (EncodeResult, error) { - res, err := c.codec.Encode(ctx, codec.EncodeRequest{TaskID: actionID, Path: path, DataSize: dataSize}) - if err != nil { return EncodeResult{}, err } - return EncodeResult{SymbolsDir: res.SymbolsDir, Metadata: res.Metadata}, nil + res, err := c.codec.Encode(ctx, codec.EncodeRequest{TaskID: actionID, Path: path, DataSize: dataSize}) + if err != nil { + return EncodeResult{}, err + } + return EncodeResult{SymbolsDir: res.SymbolsDir, Metadata: res.Metadata}, nil } func (c *codecImpl) Decode(ctx context.Context, req DecodeRequest) (DecodeResult, error) { - res, err := c.codec.Decode(ctx, codec.DecodeRequest{ActionID: req.ActionID, Symbols: req.Symbols, Layout: req.Layout}) - if err != nil { return DecodeResult{}, err } - return DecodeResult{FilePath: res.FilePath, DecodeTmpDir: res.DecodeTmpDir}, nil + res, err := c.codec.Decode(ctx, codec.DecodeRequest{ActionID: req.ActionID, Symbols: req.Symbols, Layout: req.Layout}) + if err != nil { + return DecodeResult{}, err + } + return DecodeResult{FilePath: res.FilePath, DecodeTmpDir: res.DecodeTmpDir}, nil } - diff --git a/supernode/cascade/config.go b/supernode/cascade/config.go index c2b63822..f6d9b5f3 100644 --- a/supernode/cascade/config.go +++ b/supernode/cascade/config.go @@ -2,9 +2,9 @@ package cascade // Config contains settings for the cascade service type Config struct { - // SupernodeAccountAddress is the on-chain account address of this supernode. - SupernodeAccountAddress string `mapstructure:"-" json:"-"` + // SupernodeAccountAddress is the on-chain account address of this supernode. + SupernodeAccountAddress string `mapstructure:"-" json:"-"` - RaptorQServiceAddress string `mapstructure:"-" json:"-"` - RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"` + RaptorQServiceAddress string `mapstructure:"-" json:"-"` + RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"` } diff --git a/supernode/cascade/events.go b/supernode/cascade/events.go index 0b25d3b8..f1314a1a 100644 --- a/supernode/cascade/events.go +++ b/supernode/cascade/events.go @@ -3,23 +3,23 @@ package cascade type SupernodeEventType int const ( - SupernodeEventTypeUNKNOWN SupernodeEventType = 0 - SupernodeEventTypeActionRetrieved SupernodeEventType = 1 - SupernodeEventTypeActionFeeVerified SupernodeEventType = 2 - SupernodeEventTypeTopSupernodeCheckPassed SupernodeEventType = 3 - SupernodeEventTypeMetadataDecoded SupernodeEventType = 4 - SupernodeEventTypeDataHashVerified SupernodeEventType = 5 - SupernodeEventTypeInputEncoded SupernodeEventType = 6 - SupernodeEventTypeSignatureVerified SupernodeEventType = 7 - SupernodeEventTypeRQIDsGenerated SupernodeEventType = 8 - SupernodeEventTypeRqIDsVerified SupernodeEventType = 9 - SupernodeEventTypeFinalizeSimulated SupernodeEventType = 10 - SupernodeEventTypeArtefactsStored SupernodeEventType = 11 - SupernodeEventTypeActionFinalized SupernodeEventType = 12 - SupernodeEventTypeArtefactsDownloaded SupernodeEventType = 13 - SupernodeEventTypeFinalizeSimulationFailed SupernodeEventType = 14 - // Download phase markers - SupernodeEventTypeNetworkRetrieveStarted SupernodeEventType = 15 - SupernodeEventTypeDecodeCompleted SupernodeEventType = 16 - SupernodeEventTypeServeReady SupernodeEventType = 17 + SupernodeEventTypeUNKNOWN SupernodeEventType = 0 + SupernodeEventTypeActionRetrieved SupernodeEventType = 1 + SupernodeEventTypeActionFeeVerified SupernodeEventType = 2 + SupernodeEventTypeTopSupernodeCheckPassed SupernodeEventType = 3 + SupernodeEventTypeMetadataDecoded SupernodeEventType = 4 + SupernodeEventTypeDataHashVerified SupernodeEventType = 5 + SupernodeEventTypeInputEncoded SupernodeEventType = 6 + SupernodeEventTypeSignatureVerified SupernodeEventType = 7 + SupernodeEventTypeRQIDsGenerated SupernodeEventType = 8 + SupernodeEventTypeRqIDsVerified SupernodeEventType = 9 + SupernodeEventTypeFinalizeSimulated SupernodeEventType = 10 + SupernodeEventTypeArtefactsStored SupernodeEventType = 11 + SupernodeEventTypeActionFinalized SupernodeEventType = 12 + SupernodeEventTypeArtefactsDownloaded SupernodeEventType = 13 + SupernodeEventTypeFinalizeSimulationFailed SupernodeEventType = 14 + // Download phase markers + SupernodeEventTypeNetworkRetrieveStarted SupernodeEventType = 15 + SupernodeEventTypeDecodeCompleted SupernodeEventType = 16 + SupernodeEventTypeServeReady SupernodeEventType = 17 ) diff --git a/supernode/cascade/helper.go b/supernode/cascade/helper.go index 898d1779..75315a09 100644 --- a/supernode/cascade/helper.go +++ b/supernode/cascade/helper.go @@ -1,24 +1,24 @@ package cascade import ( - "context" - "encoding/base64" - "strconv" + "context" + "encoding/base64" + "strconv" - "cosmossdk.io/math" - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/supernode/cascade/adaptors" + "cosmossdk.io/math" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/supernode/cascade/adaptors" - sdk "github.com/cosmos/cosmos-sdk/types" - json "github.com/json-iterator/go" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" + sdk "github.com/cosmos/cosmos-sdk/types" + json "github.com/json-iterator/go" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) func (task *CascadeRegistrationTask) fetchAction(ctx context.Context, actionID string, f logtrace.Fields) (*actiontypes.Action, error) { @@ -79,14 +79,14 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. if err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode layout signature from base64", err, f) } - layoutJSON, err := json.Marshal(encodedMeta) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to marshal layout", err, f) - } - layoutB64 := utils.B64Encode(layoutJSON) - if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSigBytes); err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify layout signature", err, f) - } + layoutJSON, err := json.Marshal(encodedMeta) + if err != nil { + return codec.Layout{}, "", task.wrapErr(ctx, "failed to marshal layout", err, f) + } + layoutB64 := utils.B64Encode(layoutJSON) + if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSigBytes); err != nil { + return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify layout signature", err, f) + } logtrace.Debug(ctx, "layout signature successfully verified", f) return encodedMeta, indexFile.LayoutSignature, nil } diff --git a/supernode/cascade/interfaces.go b/supernode/cascade/interfaces.go index 31055a17..e782bc23 100644 --- a/supernode/cascade/interfaces.go +++ b/supernode/cascade/interfaces.go @@ -1,19 +1,19 @@ package cascade import ( - "context" + "context" ) // CascadeServiceFactory defines an interface to create cascade tasks // //go:generate mockgen -destination=mocks/cascade_interfaces_mock.go -package=cascademocks -source=interfaces.go type CascadeServiceFactory interface { - NewCascadeRegistrationTask() CascadeTask + NewCascadeRegistrationTask() CascadeTask } // CascadeTask interface defines operations for cascade registration and data management type CascadeTask interface { - Register(ctx context.Context, req *RegisterRequest, send func(resp *RegisterResponse) error) error - Download(ctx context.Context, req *DownloadRequest, send func(resp *DownloadResponse) error) error - CleanupDownload(ctx context.Context, actionID string) error + Register(ctx context.Context, req *RegisterRequest, send func(resp *RegisterResponse) error) error + Download(ctx context.Context, req *DownloadRequest, send func(resp *DownloadResponse) error) error + CleanupDownload(ctx context.Context, actionID string) error } diff --git a/supernode/cascade/register.go b/supernode/cascade/register.go index 74d93c62..a54649be 100644 --- a/supernode/cascade/register.go +++ b/supernode/cascade/register.go @@ -1,122 +1,142 @@ package cascade import ( - "context" - "os" + "context" + "os" - "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" ) // RegisterRequest contains parameters for upload request type RegisterRequest struct { - TaskID string - ActionID string - DataHash []byte - DataSize int - FilePath string + TaskID string + ActionID string + DataHash []byte + DataSize int + FilePath string } // RegisterResponse contains the result of upload type RegisterResponse struct { - EventType SupernodeEventType - Message string - TxHash string + EventType SupernodeEventType + Message string + TxHash string } func (task *CascadeRegistrationTask) Register( - ctx context.Context, - req *RegisterRequest, - send func(resp *RegisterResponse) error, + ctx context.Context, + req *RegisterRequest, + send func(resp *RegisterResponse) error, ) (err error) { - if req != nil && req.ActionID != "" { - ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) - ctx = logtrace.CtxWithOrigin(ctx, "first_pass") - task.taskID = req.TaskID - } - - fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} - logtrace.Info(ctx, "register: request", fields) - defer func() { - if req != nil && req.FilePath != "" { - if remErr := os.RemoveAll(req.FilePath); remErr != nil { - logtrace.Warn(ctx, "Failed to remove uploaded file", fields) - } else { - logtrace.Debug(ctx, "Uploaded file cleaned up", fields) - } - } - }() - - action, err := task.fetchAction(ctx, req.ActionID, fields) - if err != nil { return err } - fields[logtrace.FieldBlockHeight] = action.BlockHeight - fields[logtrace.FieldCreator] = action.Creator - fields[logtrace.FieldStatus] = action.State - fields[logtrace.FieldPrice] = action.Price - logtrace.Info(ctx, "register: action fetched", fields) - task.streamEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", send) - - if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil { return err } - logtrace.Info(ctx, "register: fee verified", fields) - task.streamEvent(SupernodeEventTypeActionFeeVerified, "Action fee verified", "", send) - - fields[logtrace.FieldSupernodeState] = task.config.SupernodeAccountAddress - if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil { return err } - logtrace.Info(ctx, "register: top supernode confirmed", fields) - task.streamEvent(SupernodeEventTypeTopSupernodeCheckPassed, "Top supernode eligibility confirmed", "", send) - - cascadeMeta, err := cascadekit.UnmarshalCascadeMetadata(action.Metadata) - if err != nil { return task.wrapErr(ctx, "failed to unmarshal cascade metadata", err, fields) } - logtrace.Info(ctx, "register: metadata decoded", fields) - task.streamEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", send) - - if err := cascadekit.VerifyB64DataHash(req.DataHash, cascadeMeta.DataHash); err != nil { return err } - logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) - logtrace.Info(ctx, "register: data hash matched", fields) - task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send) - - encResp, err := task.encodeInput(ctx, req.ActionID, req.FilePath, req.DataSize, fields) - if err != nil { return err } - fields["symbols_dir"] = encResp.SymbolsDir - logtrace.Info(ctx, "register: input encoded", fields) - task.streamEvent(SupernodeEventTypeInputEncoded, "Input encoded", "", send) - - layout, signature, err := task.verifySignatureAndDecodeLayout(ctx, cascadeMeta.Signatures, action.Creator, encResp.Metadata, fields) - if err != nil { return err } - logtrace.Info(ctx, "register: signature verified", fields) - task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) - - rqidResp, err := task.generateRQIDFiles(ctx, cascadeMeta, signature, action.Creator, encResp.Metadata, fields) - if err != nil { return err } - fields["id_files_count"] = len(rqidResp.RedundantMetadataFiles) - logtrace.Info(ctx, "register: rqid files generated", fields) - task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send) - - if err := cascadekit.VerifySingleBlockIDs(layout, encResp.Metadata); err != nil { return task.wrapErr(ctx, "failed to verify IDs", err, fields) } - logtrace.Info(ctx, "register: rqids validated", fields) - task.streamEvent(SupernodeEventTypeRqIDsVerified, "RQIDs verified", "", send) - - if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqidResp.RQIDs); err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "register: finalize simulation failed", fields) - task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) - return task.wrapErr(ctx, "finalize action simulation failed", err, fields) - } - logtrace.Info(ctx, "register: finalize simulation passed", fields) - task.streamEvent(SupernodeEventTypeFinalizeSimulated, "Finalize simulation passed", "", send) - - if err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields); err != nil { return err } - task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) - - resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "register: finalize action error", fields) - return task.wrapErr(ctx, "failed to finalize action", err, fields) - } - txHash := resp.TxResponse.TxHash - fields[logtrace.FieldTxHash] = txHash - logtrace.Info(ctx, "register: action finalized", fields) - task.streamEvent(SupernodeEventTypeActionFinalized, "Action finalized", txHash, send) - return nil + if req != nil && req.ActionID != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) + ctx = logtrace.CtxWithOrigin(ctx, "first_pass") + task.taskID = req.TaskID + } + + fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} + logtrace.Info(ctx, "register: request", fields) + defer func() { + if req != nil && req.FilePath != "" { + if remErr := os.RemoveAll(req.FilePath); remErr != nil { + logtrace.Warn(ctx, "Failed to remove uploaded file", fields) + } else { + logtrace.Debug(ctx, "Uploaded file cleaned up", fields) + } + } + }() + + action, err := task.fetchAction(ctx, req.ActionID, fields) + if err != nil { + return err + } + fields[logtrace.FieldBlockHeight] = action.BlockHeight + fields[logtrace.FieldCreator] = action.Creator + fields[logtrace.FieldStatus] = action.State + fields[logtrace.FieldPrice] = action.Price + logtrace.Info(ctx, "register: action fetched", fields) + task.streamEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", send) + + if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil { + return err + } + logtrace.Info(ctx, "register: fee verified", fields) + task.streamEvent(SupernodeEventTypeActionFeeVerified, "Action fee verified", "", send) + + fields[logtrace.FieldSupernodeState] = task.config.SupernodeAccountAddress + if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil { + return err + } + logtrace.Info(ctx, "register: top supernode confirmed", fields) + task.streamEvent(SupernodeEventTypeTopSupernodeCheckPassed, "Top supernode eligibility confirmed", "", send) + + cascadeMeta, err := cascadekit.UnmarshalCascadeMetadata(action.Metadata) + if err != nil { + return task.wrapErr(ctx, "failed to unmarshal cascade metadata", err, fields) + } + logtrace.Info(ctx, "register: metadata decoded", fields) + task.streamEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", send) + + if err := cascadekit.VerifyB64DataHash(req.DataHash, cascadeMeta.DataHash); err != nil { + return err + } + logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) + logtrace.Info(ctx, "register: data hash matched", fields) + task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send) + + encResp, err := task.encodeInput(ctx, req.ActionID, req.FilePath, req.DataSize, fields) + if err != nil { + return err + } + fields["symbols_dir"] = encResp.SymbolsDir + logtrace.Info(ctx, "register: input encoded", fields) + task.streamEvent(SupernodeEventTypeInputEncoded, "Input encoded", "", send) + + layout, signature, err := task.verifySignatureAndDecodeLayout(ctx, cascadeMeta.Signatures, action.Creator, encResp.Metadata, fields) + if err != nil { + return err + } + logtrace.Info(ctx, "register: signature verified", fields) + task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) + + rqidResp, err := task.generateRQIDFiles(ctx, cascadeMeta, signature, action.Creator, encResp.Metadata, fields) + if err != nil { + return err + } + fields["id_files_count"] = len(rqidResp.RedundantMetadataFiles) + logtrace.Info(ctx, "register: rqid files generated", fields) + task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send) + + if err := cascadekit.VerifySingleBlockIDs(layout, encResp.Metadata); err != nil { + return task.wrapErr(ctx, "failed to verify IDs", err, fields) + } + logtrace.Info(ctx, "register: rqids validated", fields) + task.streamEvent(SupernodeEventTypeRqIDsVerified, "RQIDs verified", "", send) + + if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqidResp.RQIDs); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Info(ctx, "register: finalize simulation failed", fields) + task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) + return task.wrapErr(ctx, "finalize action simulation failed", err, fields) + } + logtrace.Info(ctx, "register: finalize simulation passed", fields) + task.streamEvent(SupernodeEventTypeFinalizeSimulated, "Finalize simulation passed", "", send) + + if err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields); err != nil { + return err + } + task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) + + resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Info(ctx, "register: finalize action error", fields) + return task.wrapErr(ctx, "failed to finalize action", err, fields) + } + txHash := resp.TxResponse.TxHash + fields[logtrace.FieldTxHash] = txHash + logtrace.Info(ctx, "register: action finalized", fields) + task.streamEvent(SupernodeEventTypeActionFinalized, "Action finalized", txHash, send) + return nil } diff --git a/supernode/cascade/service.go b/supernode/cascade/service.go index 49dde1f1..21197c86 100644 --- a/supernode/cascade/service.go +++ b/supernode/cascade/service.go @@ -1,22 +1,22 @@ package cascade import ( - "context" + "context" - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" - "github.com/LumeraProtocol/supernode/v2/supernode/cascade/adaptors" + "github.com/LumeraProtocol/supernode/v2/p2p" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/v2/supernode/cascade/adaptors" ) type CascadeService struct { - config *Config + config *Config - LumeraClient adaptors.LumeraClient - P2P adaptors.P2PService - RQ adaptors.CodecService - P2PClient p2p.Client + LumeraClient adaptors.LumeraClient + P2P adaptors.P2PService + RQ adaptors.CodecService + P2PClient p2p.Client } // Compile-time checks to ensure CascadeService implements required interfaces @@ -24,8 +24,8 @@ var _ CascadeServiceFactory = (*CascadeService)(nil) // NewCascadeRegistrationTask creates a new task for cascade registration func (service *CascadeService) NewCascadeRegistrationTask() CascadeTask { - task := NewCascadeRegistrationTask(service) - return task + task := NewCascadeRegistrationTask(service) + return task } // Run starts the service (no background workers) @@ -33,12 +33,11 @@ func (service *CascadeService) Run(ctx context.Context) error { <-ctx.Done(); re // NewCascadeService returns a new CascadeService instance func NewCascadeService(config *Config, lumera lumera.Client, p2pClient p2p.Client, codec codec.Codec, rqstore rqstore.Store) *CascadeService { - return &CascadeService{ - config: config, - LumeraClient: adaptors.NewLumeraClient(lumera), - P2P: adaptors.NewP2PService(p2pClient, rqstore), - RQ: adaptors.NewCodecService(codec), - P2PClient: p2pClient, - } + return &CascadeService{ + config: config, + LumeraClient: adaptors.NewLumeraClient(lumera), + P2P: adaptors.NewP2PService(p2pClient, rqstore), + RQ: adaptors.NewCodecService(codec), + P2PClient: p2pClient, + } } - diff --git a/supernode/cmd/config_update.go b/supernode/cmd/config_update.go index 91807962..3b3ff255 100644 --- a/supernode/cmd/config_update.go +++ b/supernode/cmd/config_update.go @@ -8,8 +8,8 @@ import ( "github.com/AlecAivazis/survey/v2" "github.com/LumeraProtocol/supernode/v2/pkg/keyring" "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/spf13/cobra" cKeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/spf13/cobra" ) // configUpdateCmd represents the config update command @@ -51,7 +51,7 @@ func promptParameterSelection() (string, error) { Message: "Select parameter to update:", Options: []string{ "Supernode IP Address", - "Supernode Port", + "Supernode Port", "Lumera GRPC Address", "Chain ID", "Key Name", @@ -197,7 +197,7 @@ func updateKeyringBackend() error { // Show warning fmt.Println("⚠️ WARNING: Changing keyring backend will switch to a different keyring.") fmt.Println("You will need to select a key from the new keyring or recover one.") - + var proceed bool confirmPrompt := &survey.Confirm{ Message: "Do you want to continue?", @@ -225,14 +225,14 @@ func updateKeyringBackend() error { // Update keyring backend in config appConfig.KeyringConfig.Backend = backend - + // Save config with new keyring backend if err := saveConfig(); err != nil { return err } fmt.Printf("Updated keyring backend to: %s\n", backend) - + // Reload config to get the new keyring settings cfgFile := filepath.Join(baseDir, DefaultConfigFile) reloadedConfig, err := config.LoadConfig(cfgFile, baseDir) @@ -269,7 +269,7 @@ func selectKeyFromNewKeyring(kr cKeyring.Keyring, keyInfos []*cKeyring.Record) e func selectKeyFromKeyring(kr cKeyring.Keyring, keyInfos []*cKeyring.Record) error { // Build options list with display format options := []string{} - + // Add existing keys for _, info := range keyInfos { addr, err := info.GetAddress() @@ -278,7 +278,7 @@ func selectKeyFromKeyring(kr cKeyring.Keyring, keyInfos []*cKeyring.Record) erro } options = append(options, fmt.Sprintf("%s (%s)", info.Name, addr.String())) } - + // Always add option to recover new key options = append(options, "Add new key (recover from mnemonic)") @@ -316,4 +316,4 @@ func saveConfig() error { func init() { configCmd.AddCommand(configUpdateCmd) -} \ No newline at end of file +} diff --git a/supernode/cmd/service.go b/supernode/cmd/service.go index 466eb657..8cd8708f 100644 --- a/supernode/cmd/service.go +++ b/supernode/cmd/service.go @@ -1,34 +1,33 @@ package cmd import ( - "context" - "reflect" + "context" + "reflect" - "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" ) type service interface { - Run(context.Context) error + Run(context.Context) error } func RunServices(ctx context.Context, services ...service) error { - group, ctx := errgroup.WithContext(ctx) - - for _, service := range services { - service := service - - group.Go(func() error { - err := service.Run(ctx) - if err != nil { - logtrace.Error(ctx, "service stopped with an error", logtrace.Fields{"service": reflect.TypeOf(service).String(), "error": err}) - } else { - logtrace.Debug(ctx, "service stopped", logtrace.Fields{"service": reflect.TypeOf(service).String()}) - } - return err - }) - } - - return group.Wait() + group, ctx := errgroup.WithContext(ctx) + + for _, service := range services { + service := service + + group.Go(func() error { + err := service.Run(ctx) + if err != nil { + logtrace.Error(ctx, "service stopped with an error", logtrace.Fields{"service": reflect.TypeOf(service).String(), "error": err}) + } else { + logtrace.Debug(ctx, "service stopped", logtrace.Fields{"service": reflect.TypeOf(service).String()}) + } + return err + }) + } + + return group.Wait() } - diff --git a/supernode/config/defaults.go b/supernode/config/defaults.go index 1db1c3a3..d7915259 100644 --- a/supernode/config/defaults.go +++ b/supernode/config/defaults.go @@ -3,14 +3,13 @@ package config // Centralized default values for configuration const ( - DefaultKeyringBackend = "test" - DefaultKeyringDir = "keys" - DefaultKeyName = "test-key" - DefaultSupernodeHost = "0.0.0.0" - DefaultSupernodePort = 4444 - DefaultP2PPort = 4445 - DefaultLumeraGRPC = "localhost:9090" - DefaultChainID = "testing" - DefaultRaptorQFilesDir = "raptorq_files" + DefaultKeyringBackend = "test" + DefaultKeyringDir = "keys" + DefaultKeyName = "test-key" + DefaultSupernodeHost = "0.0.0.0" + DefaultSupernodePort = 4444 + DefaultP2PPort = 4445 + DefaultLumeraGRPC = "localhost:9090" + DefaultChainID = "testing" + DefaultRaptorQFilesDir = "raptorq_files" ) - diff --git a/supernode/transport/grpc/cascade/handler.go b/supernode/transport/grpc/cascade/handler.go index 139ba8b3..9d75e812 100644 --- a/supernode/transport/grpc/cascade/handler.go +++ b/supernode/transport/grpc/cascade/handler.go @@ -1,311 +1,315 @@ package cascade import ( - "encoding/hex" - "fmt" - "hash" - "io" - "os" - "path/filepath" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" - "lukechampine.com/blake3" + "encoding/hex" + "fmt" + "hash" + "io" + "os" + "path/filepath" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" + "lukechampine.com/blake3" ) -type ActionServer struct { pb.UnimplementedCascadeServiceServer; factory cascadeService.CascadeServiceFactory } - +type ActionServer struct { + pb.UnimplementedCascadeServiceServer + factory cascadeService.CascadeServiceFactory +} // NewCascadeActionServer creates a new CascadeActionServer with injected service -func NewCascadeActionServer(factory cascadeService.CascadeServiceFactory) *ActionServer { return &ActionServer{factory: factory} } +func NewCascadeActionServer(factory cascadeService.CascadeServiceFactory) *ActionServer { + return &ActionServer{factory: factory} +} // calculateOptimalChunkSize returns an optimal chunk size based on file size // to balance throughput and memory usage func calculateOptimalChunkSize(fileSize int64) int { - const ( - minChunkSize = 64 * 1024 // 64 KB minimum - maxChunkSize = 4 * 1024 * 1024 // 4 MB maximum for 1GB+ files - smallFileThreshold = 1024 * 1024 // 1 MB - mediumFileThreshold = 50 * 1024 * 1024 // 50 MB - largeFileThreshold = 500 * 1024 * 1024 // 500 MB - ) - - var chunkSize int - - switch { - case fileSize <= smallFileThreshold: - chunkSize = minChunkSize - case fileSize <= mediumFileThreshold: - chunkSize = 256 * 1024 - case fileSize <= largeFileThreshold: - chunkSize = 1024 * 1024 - default: - chunkSize = maxChunkSize - } - - if chunkSize < minChunkSize { - chunkSize = minChunkSize - } - if chunkSize > maxChunkSize { - chunkSize = maxChunkSize - } - return chunkSize + const ( + minChunkSize = 64 * 1024 // 64 KB minimum + maxChunkSize = 4 * 1024 * 1024 // 4 MB maximum for 1GB+ files + smallFileThreshold = 1024 * 1024 // 1 MB + mediumFileThreshold = 50 * 1024 * 1024 // 50 MB + largeFileThreshold = 500 * 1024 * 1024 // 500 MB + ) + + var chunkSize int + + switch { + case fileSize <= smallFileThreshold: + chunkSize = minChunkSize + case fileSize <= mediumFileThreshold: + chunkSize = 256 * 1024 + case fileSize <= largeFileThreshold: + chunkSize = 1024 * 1024 + default: + chunkSize = maxChunkSize + } + + if chunkSize < minChunkSize { + chunkSize = minChunkSize + } + if chunkSize > maxChunkSize { + chunkSize = maxChunkSize + } + return chunkSize } func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) error { - fields := logtrace.Fields{ - logtrace.FieldMethod: "Register", - logtrace.FieldModule: "CascadeActionServer", - } - - ctx := stream.Context() - logtrace.Debug(ctx, "client streaming request to upload cascade input data received", fields) - - const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit - - var ( - metadata *pb.Metadata - totalSize int - ) - - hasher, tempFile, tempFilePath, err := initializeHasherAndTempFile() - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to initialize hasher and temp file", fields) - return fmt.Errorf("initializing hasher and temp file: %w", err) - } - defer func(tempFile *os.File) { - err := tempFile.Close() - if err != nil && !errors.Is(err, os.ErrClosed) { - fields[logtrace.FieldError] = err.Error() - logtrace.Warn(ctx, "error closing temp file", fields) - } - }(tempFile) - - for { - req, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "error receiving stream data", fields) - return fmt.Errorf("failed to receive stream data: %w", err) - } - - switch x := req.RequestType.(type) { - case *pb.RegisterRequest_Chunk: - if x.Chunk != nil { - if _, err := hasher.Write(x.Chunk.Data); err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to write chunk to hasher", fields) - return fmt.Errorf("hashing error: %w", err) - } - if _, err := tempFile.Write(x.Chunk.Data); err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to write chunk to file", fields) - return fmt.Errorf("file write error: %w", err) - } - totalSize += len(x.Chunk.Data) - if totalSize > maxFileSize { - fields[logtrace.FieldError] = "file size exceeds 1GB limit" - fields["total_size"] = totalSize - logtrace.Error(ctx, "upload rejected: file too large", fields) - return fmt.Errorf("file size %d exceeds maximum allowed size of 1GB", totalSize) - } - logtrace.Debug(ctx, "received data chunk", logtrace.Fields{"chunk_size": len(x.Chunk.Data), "total_size_so_far": totalSize}) - } - case *pb.RegisterRequest_Metadata: - metadata = x.Metadata - logtrace.Debug(ctx, "received metadata", logtrace.Fields{"task_id": metadata.TaskId, "action_id": metadata.ActionId}) - } - } - - if metadata == nil { - logtrace.Error(ctx, "no metadata received in stream", fields) - return fmt.Errorf("no metadata received") - } - fields[logtrace.FieldTaskID] = metadata.GetTaskId() - fields[logtrace.FieldActionID] = metadata.GetActionId() - logtrace.Debug(ctx, "metadata received from action-sdk", fields) - - if err := tempFile.Sync(); err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to sync temp file", fields) - return fmt.Errorf("failed to sync temp file: %w", err) - } - - hash := hasher.Sum(nil) - hashHex := hex.EncodeToString(hash) - fields[logtrace.FieldHashHex] = hashHex - logtrace.Debug(ctx, "final BLAKE3 hash generated", fields) - - targetPath, err := replaceTempDirWithTaskDir(metadata.GetTaskId(), tempFilePath, tempFile) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to replace temp dir with task dir", fields) - return fmt.Errorf("failed to replace temp dir with task dir: %w", err) - } - - task := server.factory.NewCascadeRegistrationTask() - err = task.Register(ctx, &cascadeService.RegisterRequest{ - TaskID: metadata.TaskId, - ActionID: metadata.ActionId, - DataHash: hash, - DataSize: totalSize, - FilePath: targetPath, - }, func(resp *cascadeService.RegisterResponse) error { - grpcResp := &pb.RegisterResponse{ - EventType: pb.SupernodeEventType(resp.EventType), - Message: resp.Message, - TxHash: resp.TxHash, - } - if err := stream.Send(grpcResp); err != nil { - logtrace.Error(ctx, "failed to send response to client", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - return nil - }) - if err != nil { - logtrace.Error(ctx, "registration task failed", logtrace.Fields{logtrace.FieldError: err.Error()}) - return fmt.Errorf("registration failed: %w", err) - } - logtrace.Debug(ctx, "cascade registration completed successfully", fields) - return nil + fields := logtrace.Fields{ + logtrace.FieldMethod: "Register", + logtrace.FieldModule: "CascadeActionServer", + } + + ctx := stream.Context() + logtrace.Debug(ctx, "client streaming request to upload cascade input data received", fields) + + const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit + + var ( + metadata *pb.Metadata + totalSize int + ) + + hasher, tempFile, tempFilePath, err := initializeHasherAndTempFile() + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to initialize hasher and temp file", fields) + return fmt.Errorf("initializing hasher and temp file: %w", err) + } + defer func(tempFile *os.File) { + err := tempFile.Close() + if err != nil && !errors.Is(err, os.ErrClosed) { + fields[logtrace.FieldError] = err.Error() + logtrace.Warn(ctx, "error closing temp file", fields) + } + }(tempFile) + + for { + req, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "error receiving stream data", fields) + return fmt.Errorf("failed to receive stream data: %w", err) + } + + switch x := req.RequestType.(type) { + case *pb.RegisterRequest_Chunk: + if x.Chunk != nil { + if _, err := hasher.Write(x.Chunk.Data); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to write chunk to hasher", fields) + return fmt.Errorf("hashing error: %w", err) + } + if _, err := tempFile.Write(x.Chunk.Data); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to write chunk to file", fields) + return fmt.Errorf("file write error: %w", err) + } + totalSize += len(x.Chunk.Data) + if totalSize > maxFileSize { + fields[logtrace.FieldError] = "file size exceeds 1GB limit" + fields["total_size"] = totalSize + logtrace.Error(ctx, "upload rejected: file too large", fields) + return fmt.Errorf("file size %d exceeds maximum allowed size of 1GB", totalSize) + } + logtrace.Debug(ctx, "received data chunk", logtrace.Fields{"chunk_size": len(x.Chunk.Data), "total_size_so_far": totalSize}) + } + case *pb.RegisterRequest_Metadata: + metadata = x.Metadata + logtrace.Debug(ctx, "received metadata", logtrace.Fields{"task_id": metadata.TaskId, "action_id": metadata.ActionId}) + } + } + + if metadata == nil { + logtrace.Error(ctx, "no metadata received in stream", fields) + return fmt.Errorf("no metadata received") + } + fields[logtrace.FieldTaskID] = metadata.GetTaskId() + fields[logtrace.FieldActionID] = metadata.GetActionId() + logtrace.Debug(ctx, "metadata received from action-sdk", fields) + + if err := tempFile.Sync(); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to sync temp file", fields) + return fmt.Errorf("failed to sync temp file: %w", err) + } + + hash := hasher.Sum(nil) + hashHex := hex.EncodeToString(hash) + fields[logtrace.FieldHashHex] = hashHex + logtrace.Debug(ctx, "final BLAKE3 hash generated", fields) + + targetPath, err := replaceTempDirWithTaskDir(metadata.GetTaskId(), tempFilePath, tempFile) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to replace temp dir with task dir", fields) + return fmt.Errorf("failed to replace temp dir with task dir: %w", err) + } + + task := server.factory.NewCascadeRegistrationTask() + err = task.Register(ctx, &cascadeService.RegisterRequest{ + TaskID: metadata.TaskId, + ActionID: metadata.ActionId, + DataHash: hash, + DataSize: totalSize, + FilePath: targetPath, + }, func(resp *cascadeService.RegisterResponse) error { + grpcResp := &pb.RegisterResponse{ + EventType: pb.SupernodeEventType(resp.EventType), + Message: resp.Message, + TxHash: resp.TxHash, + } + if err := stream.Send(grpcResp); err != nil { + logtrace.Error(ctx, "failed to send response to client", logtrace.Fields{logtrace.FieldError: err.Error()}) + return err + } + return nil + }) + if err != nil { + logtrace.Error(ctx, "registration task failed", logtrace.Fields{logtrace.FieldError: err.Error()}) + return fmt.Errorf("registration failed: %w", err) + } + logtrace.Debug(ctx, "cascade registration completed successfully", fields) + return nil } func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeService_DownloadServer) error { - ctx := stream.Context() - fields := logtrace.Fields{ - logtrace.FieldMethod: "Download", - logtrace.FieldModule: "CascadeActionServer", - logtrace.FieldActionID: req.GetActionId(), - } - logtrace.Debug(ctx, "download request received", fields) - - // Prepare to capture decoded file path from task events - var decodedFilePath string - var tmpDir string - - task := server.factory.NewCascadeRegistrationTask() - // Run cascade task Download; stream events back to client - err := task.Download(ctx, &cascadeService.DownloadRequest{ActionID: req.GetActionId(), Signature: req.GetSignature()}, func(resp *cascadeService.DownloadResponse) error { - // Forward event to gRPC client - evt := &pb.DownloadResponse{ - ResponseType: &pb.DownloadResponse_Event{ - Event: &pb.DownloadEvent{ - EventType: pb.SupernodeEventType(resp.EventType), - Message: resp.Message, - }, - }, - } - if sendErr := stream.Send(evt); sendErr != nil { - return sendErr - } - // Capture decode-completed info for streaming - if resp.EventType == cascadeService.SupernodeEventTypeDecodeCompleted { - decodedFilePath = resp.FilePath - tmpDir = resp.DownloadedDir - } - return nil - }) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "download task failed", fields) - return fmt.Errorf("download task failed: %w", err) - } - - if decodedFilePath == "" { - logtrace.Warn(ctx, "decode completed without file path", fields) - return nil - } - - // Notify client that server is ready to stream the file - if err := stream.Send(&pb.DownloadResponse{ResponseType: &pb.DownloadResponse_Event{Event: &pb.DownloadEvent{EventType: pb.SupernodeEventType_SERVE_READY, Message: "Serve ready"}}}); err != nil { - return fmt.Errorf("send serve-ready: %w", err) - } - - // Stream file content in chunks - fi, err := os.Stat(decodedFilePath) - if err != nil { - return fmt.Errorf("stat decoded file: %w", err) - } - chunkSize := calculateOptimalChunkSize(fi.Size()) - f, err := os.Open(decodedFilePath) - if err != nil { - return fmt.Errorf("open decoded file: %w", err) - } - defer f.Close() - - buf := make([]byte, chunkSize) - for { - n, rerr := f.Read(buf) - if n > 0 { - if err := stream.Send(&pb.DownloadResponse{ResponseType: &pb.DownloadResponse_Chunk{Chunk: &pb.DataChunk{Data: append([]byte(nil), buf[:n]...)}}}); err != nil { - return fmt.Errorf("send chunk: %w", err) - } - } - if rerr == io.EOF { - break - } - if rerr != nil { - return fmt.Errorf("read decoded file: %w", rerr) - } - } - - // Cleanup temp directory if provided - if tmpDir != "" { - if cerr := task.CleanupDownload(ctx, tmpDir); cerr != nil { - logtrace.Warn(ctx, "cleanup of tmp dir failed", logtrace.Fields{"tmp_dir": tmpDir, logtrace.FieldError: cerr.Error()}) - } - } - - logtrace.Debug(ctx, "download stream completed", fields) - return nil + ctx := stream.Context() + fields := logtrace.Fields{ + logtrace.FieldMethod: "Download", + logtrace.FieldModule: "CascadeActionServer", + logtrace.FieldActionID: req.GetActionId(), + } + logtrace.Debug(ctx, "download request received", fields) + + // Prepare to capture decoded file path from task events + var decodedFilePath string + var tmpDir string + + task := server.factory.NewCascadeRegistrationTask() + // Run cascade task Download; stream events back to client + err := task.Download(ctx, &cascadeService.DownloadRequest{ActionID: req.GetActionId(), Signature: req.GetSignature()}, func(resp *cascadeService.DownloadResponse) error { + // Forward event to gRPC client + evt := &pb.DownloadResponse{ + ResponseType: &pb.DownloadResponse_Event{ + Event: &pb.DownloadEvent{ + EventType: pb.SupernodeEventType(resp.EventType), + Message: resp.Message, + }, + }, + } + if sendErr := stream.Send(evt); sendErr != nil { + return sendErr + } + // Capture decode-completed info for streaming + if resp.EventType == cascadeService.SupernodeEventTypeDecodeCompleted { + decodedFilePath = resp.FilePath + tmpDir = resp.DownloadedDir + } + return nil + }) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "download task failed", fields) + return fmt.Errorf("download task failed: %w", err) + } + + if decodedFilePath == "" { + logtrace.Warn(ctx, "decode completed without file path", fields) + return nil + } + + // Notify client that server is ready to stream the file + if err := stream.Send(&pb.DownloadResponse{ResponseType: &pb.DownloadResponse_Event{Event: &pb.DownloadEvent{EventType: pb.SupernodeEventType_SERVE_READY, Message: "Serve ready"}}}); err != nil { + return fmt.Errorf("send serve-ready: %w", err) + } + + // Stream file content in chunks + fi, err := os.Stat(decodedFilePath) + if err != nil { + return fmt.Errorf("stat decoded file: %w", err) + } + chunkSize := calculateOptimalChunkSize(fi.Size()) + f, err := os.Open(decodedFilePath) + if err != nil { + return fmt.Errorf("open decoded file: %w", err) + } + defer f.Close() + + buf := make([]byte, chunkSize) + for { + n, rerr := f.Read(buf) + if n > 0 { + if err := stream.Send(&pb.DownloadResponse{ResponseType: &pb.DownloadResponse_Chunk{Chunk: &pb.DataChunk{Data: append([]byte(nil), buf[:n]...)}}}); err != nil { + return fmt.Errorf("send chunk: %w", err) + } + } + if rerr == io.EOF { + break + } + if rerr != nil { + return fmt.Errorf("read decoded file: %w", rerr) + } + } + + // Cleanup temp directory if provided + if tmpDir != "" { + if cerr := task.CleanupDownload(ctx, tmpDir); cerr != nil { + logtrace.Warn(ctx, "cleanup of tmp dir failed", logtrace.Fields{"tmp_dir": tmpDir, logtrace.FieldError: cerr.Error()}) + } + } + + logtrace.Debug(ctx, "download stream completed", fields) + return nil } // initializeHasherAndTempFile prepares a hasher and a temporary file to stream upload data into. func initializeHasherAndTempFile() (hash.Hash, *os.File, string, error) { - // Create a temp directory for the upload - tmpDir, err := os.MkdirTemp("", "supernode-upload-*") - if err != nil { - return nil, nil, "", fmt.Errorf("create temp dir: %w", err) - } - - // Create a file within the temp directory - filePath := filepath.Join(tmpDir, "data.bin") - f, err := os.Create(filePath) - if err != nil { - return nil, nil, "", fmt.Errorf("create temp file: %w", err) - } - - // Create a BLAKE3 hasher (32 bytes output) - hasher := blake3.New(32, nil) - return hasher, f, filePath, nil + // Create a temp directory for the upload + tmpDir, err := os.MkdirTemp("", "supernode-upload-*") + if err != nil { + return nil, nil, "", fmt.Errorf("create temp dir: %w", err) + } + + // Create a file within the temp directory + filePath := filepath.Join(tmpDir, "data.bin") + f, err := os.Create(filePath) + if err != nil { + return nil, nil, "", fmt.Errorf("create temp file: %w", err) + } + + // Create a BLAKE3 hasher (32 bytes output) + hasher := blake3.New(32, nil) + return hasher, f, filePath, nil } // replaceTempDirWithTaskDir moves the uploaded file into a task-scoped directory // and returns the new absolute path. func replaceTempDirWithTaskDir(taskID, tempFilePath string, tempFile *os.File) (string, error) { - // Ensure data is flushed - _ = tempFile.Sync() - // Close now; deferred close may run later and is safe to ignore - _ = tempFile.Close() - - // Create a stable target directory under OS temp - targetDir := filepath.Join(os.TempDir(), "supernode", "uploads", taskID) - if err := os.MkdirAll(targetDir, 0700); err != nil { - return "", fmt.Errorf("create task dir: %w", err) - } - - newPath := filepath.Join(targetDir, filepath.Base(tempFilePath)) - if err := os.Rename(tempFilePath, newPath); err != nil { - return "", fmt.Errorf("move uploaded file: %w", err) - } - - // Attempt to cleanup the original temp directory - _ = os.RemoveAll(filepath.Dir(tempFilePath)) - return newPath, nil + // Ensure data is flushed + _ = tempFile.Sync() + // Close now; deferred close may run later and is safe to ignore + _ = tempFile.Close() + + // Create a stable target directory under OS temp + targetDir := filepath.Join(os.TempDir(), "supernode", "uploads", taskID) + if err := os.MkdirAll(targetDir, 0700); err != nil { + return "", fmt.Errorf("create task dir: %w", err) + } + + newPath := filepath.Join(targetDir, filepath.Base(tempFilePath)) + if err := os.Rename(tempFilePath, newPath); err != nil { + return "", fmt.Errorf("move uploaded file: %w", err) + } + + // Attempt to cleanup the original temp directory + _ = os.RemoveAll(filepath.Dir(tempFilePath)) + return newPath, nil } diff --git a/supernode/transport/grpc/status/handler.go b/supernode/transport/grpc/status/handler.go index 8e414cca..7c9c22bf 100644 --- a/supernode/transport/grpc/status/handler.go +++ b/supernode/transport/grpc/status/handler.go @@ -1,25 +1,24 @@ package server import ( - "context" + "context" - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - statussvc "github.com/LumeraProtocol/supernode/v2/supernode/status" + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + statussvc "github.com/LumeraProtocol/supernode/v2/supernode/status" ) // SupernodeServer implements the SupernodeService gRPC service type SupernodeServer struct { - pb.UnimplementedSupernodeServiceServer - statusService *statussvc.SupernodeStatusService + pb.UnimplementedSupernodeServiceServer + statusService *statussvc.SupernodeStatusService } - // NewSupernodeServer creates a new SupernodeServer func NewSupernodeServer(statusService *statussvc.SupernodeStatusService) *SupernodeServer { - return &SupernodeServer{statusService: statusService} + return &SupernodeServer{statusService: statusService} } // GetStatus implements SupernodeService.GetStatus func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) { - return s.statusService.GetStatus(ctx, req.GetIncludeP2PMetrics()) + return s.statusService.GetStatus(ctx, req.GetIncludeP2PMetrics()) } diff --git a/supernode/transport/grpc/status/server.go b/supernode/transport/grpc/status/server.go index b06ae9df..b7938983 100644 --- a/supernode/transport/grpc/status/server.go +++ b/supernode/transport/grpc/status/server.go @@ -23,15 +23,15 @@ import ( // Server represents supernode server type Server struct { - identity string - listenAddrs string - port int - services []grpcserver.ServiceDesc - name string - kr keyring.Keyring - grpcServer *grpcserver.Server - lumeraClient lumera.Client - healthServer *health.Server + identity string + listenAddrs string + port int + services []grpcserver.ServiceDesc + name string + kr keyring.Keyring + grpcServer *grpcserver.Server + lumeraClient lumera.Client + healthServer *health.Server } // Run starts the server @@ -45,12 +45,12 @@ func (server *Server) Run(ctx context.Context) error { // Set up gRPC logging logtrace.SetGRPCLogger() - logtrace.Debug(ctx, "Server identity configured", logtrace.Fields{logtrace.FieldModule: "server", "identity": server.identity}) - logtrace.Debug(ctx, "Server listening", logtrace.Fields{logtrace.FieldModule: "server", "addresses": server.listenAddrs}) + logtrace.Debug(ctx, "Server identity configured", logtrace.Fields{logtrace.FieldModule: "server", "identity": server.identity}) + logtrace.Debug(ctx, "Server listening", logtrace.Fields{logtrace.FieldModule: "server", "addresses": server.listenAddrs}) group, ctx := errgroup.WithContext(ctx) - addresses := strings.Split(server.listenAddrs, ",") + addresses := strings.Split(server.listenAddrs, ",") if err := server.setupGRPCServer(); err != nil { logtrace.Fatal(ctx, "Failed to setup gRPC server", logtrace.Fields{logtrace.FieldModule: "server", logtrace.FieldError: err.Error()}) } @@ -67,7 +67,7 @@ func (server *Server) Run(ctx context.Context) error { opts.WriteBufferSize = (8 * 1024 * 1024) // 8MB TCP buffer for _, address := range addresses { - addr := net.JoinHostPort(strings.TrimSpace(address), strconv.Itoa(server.port)) + addr := net.JoinHostPort(strings.TrimSpace(address), strconv.Itoa(server.port)) address := addr // Create a new variable to avoid closure issues group.Go(func() error { @@ -81,14 +81,14 @@ func (server *Server) Run(ctx context.Context) error { func (server *Server) setupGRPCServer() error { // Create server credentials - serverCreds, err := ltc.NewServerCreds(<c.ServerOptions{ - CommonOptions: ltc.CommonOptions{ - Keyring: server.kr, - LocalIdentity: server.identity, - PeerType: securekeyx.Supernode, - Validator: lumera.NewSecureKeyExchangeValidator(server.lumeraClient), - }, - }) + serverCreds, err := ltc.NewServerCreds(<c.ServerOptions{ + CommonOptions: ltc.CommonOptions{ + Keyring: server.kr, + LocalIdentity: server.identity, + PeerType: securekeyx.Supernode, + Validator: lumera.NewSecureKeyExchangeValidator(server.lumeraClient), + }, + }) if err != nil { return fmt.Errorf("failed to create server credentials: %w", err) } @@ -143,16 +143,16 @@ func (server *Server) Close() { // New returns a new Server instance. func New(identity, listenAddrs string, port int, name string, kr keyring.Keyring, lumeraClient lumera.Client, services ...grpcserver.ServiceDesc) (*Server, error) { - if listenAddrs == "" { - return nil, fmt.Errorf("listen addresses cannot be empty") - } - return &Server{ - identity: identity, - listenAddrs: listenAddrs, - port: port, - services: services, - name: name, - kr: kr, - lumeraClient: lumeraClient, - }, nil + if listenAddrs == "" { + return nil, fmt.Errorf("listen addresses cannot be empty") + } + return &Server{ + identity: identity, + listenAddrs: listenAddrs, + port: port, + services: services, + name: name, + kr: kr, + lumeraClient: lumeraClient, + }, nil } diff --git a/supernode/verifier/interface.go b/supernode/verifier/interface.go index d068dfce..d2668c9c 100644 --- a/supernode/verifier/interface.go +++ b/supernode/verifier/interface.go @@ -4,28 +4,32 @@ import "context" // ConfigVerifierService defines verification methods type ConfigVerifierService interface { - VerifyConfig(ctx context.Context) (*VerificationResult, error) + VerifyConfig(ctx context.Context) (*VerificationResult, error) } // ConfigError represents a config validation error or warning type ConfigError struct { - Field string - Expected string - Actual string - Message string + Field string + Expected string + Actual string + Message string } // VerificationResult holds the outcome of config verification type VerificationResult struct { - Valid bool - Errors []ConfigError - Warnings []ConfigError + Valid bool + Errors []ConfigError + Warnings []ConfigError } -func (r *VerificationResult) IsValid() bool { return r.Valid && len(r.Errors) == 0 } +func (r *VerificationResult) IsValid() bool { return r.Valid && len(r.Errors) == 0 } func (r *VerificationResult) HasWarnings() bool { return len(r.Warnings) > 0 } func (r *VerificationResult) Summary() string { - if !r.IsValid() { return "invalid: check errors" } - if r.HasWarnings() { return "valid with warnings" } - return "valid" + if !r.IsValid() { + return "invalid: check errors" + } + if r.HasWarnings() { + return "valid with warnings" + } + return "valid" } diff --git a/supernode/verifier/verifier.go b/supernode/verifier/verifier.go index 91d35a41..4875fe99 100644 --- a/supernode/verifier/verifier.go +++ b/supernode/verifier/verifier.go @@ -1,76 +1,129 @@ package verifier import ( - "context" - "fmt" - "net" + "context" + "fmt" + "net" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - snmodule "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/cosmos/cosmos-sdk/crypto/keyring" - sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + snmodule "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" + "github.com/LumeraProtocol/supernode/v2/supernode/config" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" ) -type ConfigVerifier struct { config *config.Config; lumeraClient lumera.Client; keyring keyring.Keyring } +type ConfigVerifier struct { + config *config.Config + lumeraClient lumera.Client + keyring keyring.Keyring +} -func NewConfigVerifier(cfg *config.Config, client lumera.Client, kr keyring.Keyring) ConfigVerifierService { return &ConfigVerifier{config: cfg, lumeraClient: client, keyring: kr} } +func NewConfigVerifier(cfg *config.Config, client lumera.Client, kr keyring.Keyring) ConfigVerifierService { + return &ConfigVerifier{config: cfg, lumeraClient: client, keyring: kr} +} func (cv *ConfigVerifier) VerifyConfig(ctx context.Context) (*VerificationResult, error) { - result := &VerificationResult{ Valid: true, Errors: []ConfigError{}, Warnings: []ConfigError{} } - logtrace.Debug(ctx, "Starting config verification", logtrace.Fields{"identity": cv.config.SupernodeConfig.Identity, "key_name": cv.config.SupernodeConfig.KeyName, "p2p_port": cv.config.P2PConfig.Port}) - if err := cv.checkKeyExists(result); err != nil { return result, err } - if err := cv.checkIdentityMatches(result); err != nil { return result, err } - if !result.IsValid() { return result, nil } - supernodeInfo, err := cv.checkSupernodeExists(ctx, result); if err != nil { return result, err } - if supernodeInfo == nil { return result, nil } - cv.checkSupernodeState(result, supernodeInfo) - cv.checkPortsAvailable(result) - logtrace.Debug(ctx, "Config verification completed", logtrace.Fields{"valid": result.IsValid(), "errors": len(result.Errors), "warnings": len(result.Warnings)}) - return result, nil + result := &VerificationResult{Valid: true, Errors: []ConfigError{}, Warnings: []ConfigError{}} + logtrace.Debug(ctx, "Starting config verification", logtrace.Fields{"identity": cv.config.SupernodeConfig.Identity, "key_name": cv.config.SupernodeConfig.KeyName, "p2p_port": cv.config.P2PConfig.Port}) + if err := cv.checkKeyExists(result); err != nil { + return result, err + } + if err := cv.checkIdentityMatches(result); err != nil { + return result, err + } + if !result.IsValid() { + return result, nil + } + supernodeInfo, err := cv.checkSupernodeExists(ctx, result) + if err != nil { + return result, err + } + if supernodeInfo == nil { + return result, nil + } + cv.checkSupernodeState(result, supernodeInfo) + cv.checkPortsAvailable(result) + logtrace.Debug(ctx, "Config verification completed", logtrace.Fields{"valid": result.IsValid(), "errors": len(result.Errors), "warnings": len(result.Warnings)}) + return result, nil } func (cv *ConfigVerifier) checkKeyExists(result *VerificationResult) error { - _, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) - if err != nil { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "key_name", Actual: cv.config.SupernodeConfig.KeyName, Message: fmt.Sprintf("Key '%s' not found in keyring", cv.config.SupernodeConfig.KeyName)}) } - return nil + _, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) + if err != nil { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "key_name", Actual: cv.config.SupernodeConfig.KeyName, Message: fmt.Sprintf("Key '%s' not found in keyring", cv.config.SupernodeConfig.KeyName)}) + } + return nil } func (cv *ConfigVerifier) checkIdentityMatches(result *VerificationResult) error { - keyInfo, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName); if err != nil { return nil } - pubKey, err := keyInfo.GetPubKey(); if err != nil { return fmt.Errorf("failed to get public key for key '%s': %w", cv.config.SupernodeConfig.KeyName, err) } - addr := sdk.AccAddress(pubKey.Address()) - if addr.String() != cv.config.SupernodeConfig.Identity { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "identity", Expected: addr.String(), Actual: cv.config.SupernodeConfig.Identity, Message: fmt.Sprintf("Key '%s' resolves to %s but config identity is %s", cv.config.SupernodeConfig.KeyName, addr.String(), cv.config.SupernodeConfig.Identity)}) } - return nil + keyInfo, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) + if err != nil { + return nil + } + pubKey, err := keyInfo.GetPubKey() + if err != nil { + return fmt.Errorf("failed to get public key for key '%s': %w", cv.config.SupernodeConfig.KeyName, err) + } + addr := sdk.AccAddress(pubKey.Address()) + if addr.String() != cv.config.SupernodeConfig.Identity { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "identity", Expected: addr.String(), Actual: cv.config.SupernodeConfig.Identity, Message: fmt.Sprintf("Key '%s' resolves to %s but config identity is %s", cv.config.SupernodeConfig.KeyName, addr.String(), cv.config.SupernodeConfig.Identity)}) + } + return nil } func (cv *ConfigVerifier) checkSupernodeExists(ctx context.Context, result *VerificationResult) (*snmodule.SuperNodeInfo, error) { - sn, err := cv.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, cv.config.SupernodeConfig.Identity) - if err != nil { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "registration", Actual: "not_registered", Message: fmt.Sprintf("Supernode not registered on chain for address %s", cv.config.SupernodeConfig.Identity)}); return nil, nil } - return sn, nil + sn, err := cv.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, cv.config.SupernodeConfig.Identity) + if err != nil { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "registration", Actual: "not_registered", Message: fmt.Sprintf("Supernode not registered on chain for address %s", cv.config.SupernodeConfig.Identity)}) + return nil, nil + } + return sn, nil } func (cv *ConfigVerifier) checkP2PPortMatches(result *VerificationResult, supernodeInfo *snmodule.SuperNodeInfo) { - configPort := fmt.Sprintf("%d", cv.config.P2PConfig.Port) - chainPort := supernodeInfo.P2PPort - if chainPort != "" && chainPort != configPort { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "p2p_port", Expected: chainPort, Actual: configPort, Message: fmt.Sprintf("P2P port mismatch: config=%s, chain=%s", configPort, chainPort)}) } + configPort := fmt.Sprintf("%d", cv.config.P2PConfig.Port) + chainPort := supernodeInfo.P2PPort + if chainPort != "" && chainPort != configPort { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "p2p_port", Expected: chainPort, Actual: configPort, Message: fmt.Sprintf("P2P port mismatch: config=%s, chain=%s", configPort, chainPort)}) + } } func (cv *ConfigVerifier) checkSupernodeState(result *VerificationResult, supernodeInfo *snmodule.SuperNodeInfo) { - if supernodeInfo.CurrentState != "" && supernodeInfo.CurrentState != "SUPERNODE_STATE_ACTIVE" { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "state", Expected: "SUPERNODE_STATE_ACTIVE", Actual: supernodeInfo.CurrentState, Message: fmt.Sprintf("Supernode state is %s (expected ACTIVE)", supernodeInfo.CurrentState)}) } + if supernodeInfo.CurrentState != "" && supernodeInfo.CurrentState != "SUPERNODE_STATE_ACTIVE" { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "state", Expected: "SUPERNODE_STATE_ACTIVE", Actual: supernodeInfo.CurrentState, Message: fmt.Sprintf("Supernode state is %s (expected ACTIVE)", supernodeInfo.CurrentState)}) + } } func (cv *ConfigVerifier) checkPortsAvailable(result *VerificationResult) { - if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.SupernodeConfig.Port)) { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "supernode_port", Actual: fmt.Sprintf("%d", cv.config.SupernodeConfig.Port), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.SupernodeConfig.Port)}) } - if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.P2PConfig.Port)) { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "p2p_port", Actual: fmt.Sprintf("%d", cv.config.P2PConfig.Port), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.P2PConfig.Port)}) } - gatewayPort := int(cv.config.SupernodeConfig.GatewayPort); if gatewayPort == 0 { gatewayPort = 8002 } - if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, gatewayPort) { result.Valid = false; result.Errors = append(result.Errors, ConfigError{Field: "gateway_port", Actual: fmt.Sprintf("%d", gatewayPort), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", gatewayPort)}) } + if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.SupernodeConfig.Port)) { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "supernode_port", Actual: fmt.Sprintf("%d", cv.config.SupernodeConfig.Port), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.SupernodeConfig.Port)}) + } + if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.P2PConfig.Port)) { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "p2p_port", Actual: fmt.Sprintf("%d", cv.config.P2PConfig.Port), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.P2PConfig.Port)}) + } + gatewayPort := int(cv.config.SupernodeConfig.GatewayPort) + if gatewayPort == 0 { + gatewayPort = 8002 + } + if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, gatewayPort) { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "gateway_port", Actual: fmt.Sprintf("%d", gatewayPort), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", gatewayPort)}) + } } func (cv *ConfigVerifier) isPortAvailable(host string, port int) bool { - ln, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port)) - if err != nil { return false } - _ = ln.Close() - return true + ln, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port)) + if err != nil { + return false + } + _ = ln.Close() + return true } diff --git a/tests/integration/p2p/p2p_integration_test.go b/tests/integration/p2p/p2p_integration_test.go index 478711d2..a856211b 100644 --- a/tests/integration/p2p/p2p_integration_test.go +++ b/tests/integration/p2p/p2p_integration_test.go @@ -203,8 +203,8 @@ func SetupTestP2PNodes(t *testing.T, ctx context.Context) ([]p2p.Client, []*rqst require.NoError(t, err, "failed to create rqstore for node %d: %v", i, err) rqStores = append(rqStores, rqStore) - // Disable metrics in integration tests by default - service, err := p2p.New(ctx, p2pConfig, mockClient, kr, rqStore, nil, nil) + // Disable metrics in integration tests by default + service, err := p2p.New(ctx, p2pConfig, mockClient, kr, rqStore, nil, nil) require.NoError(t, err, "failed to create p2p service for node %d: %v", i, err) // Start P2P service From c27161d0c457a9f3ad85d0f3309be19c516310ff Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Sat, 4 Oct 2025 18:10:20 +0500 Subject: [PATCH 07/36] SDK changes --- pkg/codec/codec.go | 7 +++-- pkg/codec/codec_mock.go | 65 ---------------------------------------- pkg/codec/raptorq.go | 66 +++++++++++++++++++++++++++++++++++++---- sdk/action/client.go | 32 ++++++++------------ 4 files changed, 77 insertions(+), 93 deletions(-) delete mode 100644 pkg/codec/codec_mock.go diff --git a/pkg/codec/codec.go b/pkg/codec/codec.go index 39029569..e9a88a1f 100644 --- a/pkg/codec/codec.go +++ b/pkg/codec/codec.go @@ -1,5 +1,3 @@ -//go:generate mockgen -destination=codec_mock.go -package=codec -source=codec.go - package codec import ( @@ -19,7 +17,7 @@ type Layout struct { // Block is the schema for each entry in the “blocks” array. type Block struct { BlockID int `json:"block_id"` - EncoderParameters []int `json:"encoder_parameters"` + EncoderParameters []uint8 `json:"encoder_parameters"` OriginalOffset int64 `json:"original_offset"` Size int64 `json:"size"` Symbols []string `json:"symbols"` @@ -38,4 +36,7 @@ type Codec interface { // Encode a file Encode(ctx context.Context, req EncodeRequest) (EncodeResponse, error) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) + // CreateMetadata builds the single-block layout metadata for the given file + // without generating RaptorQ symbols. + CreateMetadata(ctx context.Context, path string) (Layout, error) } diff --git a/pkg/codec/codec_mock.go b/pkg/codec/codec_mock.go deleted file mode 100644 index 09484cee..00000000 --- a/pkg/codec/codec_mock.go +++ /dev/null @@ -1,65 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: codec.go - -// Package codec is a generated GoMock package. -package codec - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" -) - -// MockCodec is a mock of Codec interface. -type MockCodec struct { - ctrl *gomock.Controller - recorder *MockCodecMockRecorder -} - -// MockCodecMockRecorder is the mock recorder for MockCodec. -type MockCodecMockRecorder struct { - mock *MockCodec -} - -// NewMockCodec creates a new mock instance. -func NewMockCodec(ctrl *gomock.Controller) *MockCodec { - mock := &MockCodec{ctrl: ctrl} - mock.recorder = &MockCodecMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCodec) EXPECT() *MockCodecMockRecorder { - return m.recorder -} - -// Decode mocks base method. -func (m *MockCodec) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Decode", ctx, req) - ret0, _ := ret[0].(DecodeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Decode indicates an expected call of Decode. -func (mr *MockCodecMockRecorder) Decode(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Decode", reflect.TypeOf((*MockCodec)(nil).Decode), ctx, req) -} - -// Encode mocks base method. -func (m *MockCodec) Encode(ctx context.Context, req EncodeRequest) (EncodeResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Encode", ctx, req) - ret0, _ := ret[0].(EncodeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Encode indicates an expected call of Encode. -func (mr *MockCodecMockRecorder) Encode(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Encode", reflect.TypeOf((*MockCodec)(nil).Encode), ctx, req) -} diff --git a/pkg/codec/raptorq.go b/pkg/codec/raptorq.go index 541aac58..14bad1d9 100644 --- a/pkg/codec/raptorq.go +++ b/pkg/codec/raptorq.go @@ -57,7 +57,6 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons symbolsDir := filepath.Join(rq.symbolsBaseDir, req.TaskID) if err := os.MkdirAll(symbolsDir, 0o755); err != nil { fields[logtrace.FieldError] = err.Error() - os.Remove(req.Path) return EncodeResponse{}, fmt.Errorf("mkdir %s: %w", symbolsDir, err) } logtrace.Debug(ctx, "RaptorQ processor encoding", fields) @@ -65,13 +64,9 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons resp, err := processor.EncodeFile(req.Path, symbolsDir, blockSize) if err != nil { fields[logtrace.FieldError] = err.Error() - os.Remove(req.Path) return EncodeResponse{}, fmt.Errorf("raptorq encode: %w", err) } - /* we no longer need the temp file */ - // _ = os.Remove(tmpPath) - /* ---------- 2. read the layout JSON ---------- */ layoutData, err := os.ReadFile(resp.LayoutFilePath) logtrace.Debug(ctx, "RaptorQ processor layout file", logtrace.Fields{ @@ -94,3 +89,64 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons return encodeResp, nil } + +// CreateMetadata builds only the layout metadata for the given file without generating symbols. +func (rq *raptorQ) CreateMetadata(ctx context.Context, path string) (Layout, error) { + // Populate fields; include data-size by stat-ing the file to preserve existing log fields + fields := logtrace.Fields{ + logtrace.FieldMethod: "CreateMetadata", + logtrace.FieldModule: "rq", + "path": path, + } + if fi, err := os.Stat(path); err == nil { + fields["data-size"] = int(fi.Size()) + } + + processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) + if err != nil { + return Layout{}, fmt.Errorf("create RaptorQ processor: %w", err) + } + defer processor.Free() + logtrace.Debug(ctx, "RaptorQ processor created", fields) + + // Deterministic: force single block + blockSize := rqBlockSize + + // Prepare a temporary path for the generated layout file + base := rq.symbolsBaseDir + if base == "" { + base = os.TempDir() + } + tmpDir, err := os.MkdirTemp(base, "rq_meta_*") + if err != nil { + fields[logtrace.FieldError] = err.Error() + return Layout{}, fmt.Errorf("mkdir temp dir: %w", err) + } + defer os.RemoveAll(tmpDir) + layoutPath := filepath.Join(tmpDir, "layout.json") + + // Use rq-go's metadata-only creation; no symbols are produced here. + resp, err := processor.CreateMetadata(path, layoutPath, blockSize) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return Layout{}, fmt.Errorf("raptorq create metadata: %w", err) + } + + layoutData, err := os.ReadFile(resp.LayoutFilePath) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return Layout{}, fmt.Errorf("read layout %s: %w", resp.LayoutFilePath, err) + } + + var layout Layout + if err := json.Unmarshal(layoutData, &layout); err != nil { + return Layout{}, fmt.Errorf("unmarshal layout: %w", err) + } + + // Enforce single-block output; abort if multiple blocks are produced + if n := len(layout.Blocks); n != 1 { + return Layout{}, fmt.Errorf("raptorq metadata produced %d blocks; single-block layout is required", n) + } + + return layout, nil +} diff --git a/sdk/action/client.go b/sdk/action/client.go index 02f2bafc..356f59f2 100644 --- a/sdk/action/client.go +++ b/sdk/action/client.go @@ -239,8 +239,8 @@ func (c *ClientImpl) DownloadCascade(ctx context.Context, actionID, outputDir, s } // BuildCascadeMetadataFromFile produces Cascade metadata (including signatures) from a local file path. -// It uses a temporary RaptorQ workspace, enforces single-block layout via the codec, and cleans up after. -// BuildCascadeMetadataFromFile builds Cascade metadata, price and expiration from a file path. +// It generates only the single-block RaptorQ layout metadata (no symbols), signs it, +// and returns metadata, price and expiration. func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath string, public bool) (actiontypes.CascadeMetadata, string, string, error) { if filePath == "" { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("file path is empty") @@ -254,30 +254,22 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("read file: %w", err) } - // Create temp workspace for codec symbols; remove after - baseDir, err := os.MkdirTemp("", "rq_files_*") - if err != nil { - return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("create temp dir: %w", err) - } - defer os.RemoveAll(baseDir) - - rq := codec.NewRaptorQCodec(baseDir) - // Use a simple task ID with epoch to avoid collisions - taskID := fmt.Sprintf("sdk-%d", time.Now().UnixNano()) - enc, err := rq.Encode(ctx, codec.EncodeRequest{TaskID: taskID, Path: filePath, DataSize: int(fi.Size())}) - if err != nil { - return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("raptorq encode: %w", err) - } + // Build layout metadata only (no symbols). Supernodes will create symbols. + rq := codec.NewRaptorQCodec("") + layout, err := rq.CreateMetadata(ctx, filePath) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("raptorq create metadata: %w", err) + } // Derive `max` from chain params, then create signatures and index IDs paramsResp, err := c.lumeraClient.GetActionParams(ctx) if err != nil { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("get action params: %w", err) } - // Use MaxDdAndFingerprints as the count for rq_ids generation (chain maps this to rq_ids_max for Cascade) + // Use MaxRaptorQSymbols as the count for rq_ids generation. var max uint32 - if paramsResp != nil && paramsResp.Params.MaxDdAndFingerprints > 0 { - max = uint32(paramsResp.Params.MaxDdAndFingerprints) + if paramsResp != nil && paramsResp.Params.MaxRaptorQSymbols > 0 { + max = uint32(paramsResp.Params.MaxRaptorQSymbols) } else { // Fallback to a sane default if params missing max = 50 @@ -285,7 +277,7 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath // Pick a random initial counter in [1,100] rnd, _ := crand.Int(crand.Reader, big.NewInt(100)) ic := uint32(rnd.Int64() + 1) // 1..100 - signatures, _, err := cascadekit.CreateSignaturesWithKeyring(enc.Metadata, c.keyring, c.config.Account.KeyName, ic, max) + signatures, _, err := cascadekit.CreateSignaturesWithKeyring(layout, c.keyring, c.config.Account.KeyName, ic, max) if err != nil { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("create signatures: %w", err) } From 028179edb4937bcb40405eddb07a9acd249910c9 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Tue, 7 Oct 2025 20:53:34 +0500 Subject: [PATCH 08/36] sn-manager update --- Makefile | 37 +++++- pkg/cascadekit/serialize.go | 29 +++++ pkg/codec/decode.go | 34 +++++- pkg/logtrace/log.go | 2 +- sdk/action/client.go | 12 +- sn-manager/cmd/check.go | 50 ++++---- sn-manager/cmd/start.go | 31 ++++- sn-manager/internal/config/config.go | 17 ++- sn-manager/internal/manager/manager.go | 30 ++--- sn-manager/internal/updater/updater.go | 126 ++++++++++++++++++--- supernode/cascade/helper.go | 8 +- supernode/cmd/start.go | 42 +++---- supernode/config.yml | 2 + supernode/config/config.go | 18 ++- supernode/transport/gateway/swagger.json | 33 ++++++ supernode/transport/grpc/status/handler.go | 29 +++++ tests/system/go.mod | 1 - tests/system/go.sum | 4 - 18 files changed, 382 insertions(+), 123 deletions(-) create mode 100644 pkg/cascadekit/serialize.go diff --git a/Makefile b/Makefile index fd9dfebf..2d300b9b 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,8 @@ -.PHONY: build build-release build-sncli build-sn-manager +.PHONY: build build-sncli build-sn-manager .PHONY: install-lumera setup-supernodes system-test-setup install-deps .PHONY: gen-cascade gen-supernode .PHONY: test-e2e test-unit test-integration test-system +.PHONY: release # Build variables VERSION ?= $(shell git describe --tags --always --dirty 2>/dev/null || echo "dev") @@ -22,11 +23,8 @@ SN_MANAGER_LDFLAGS = -X main.Version=$(VERSION) \ build: @mkdir -p release - CGO_ENABLED=1 \ - GOOS=linux \ - GOARCH=amd64 \ - echo "Building supernode..." - go build \ + @echo "Building supernode..." + CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build \ -trimpath \ -ldflags="-s -w $(LDFLAGS)" \ -o release/supernode-linux-amd64 \ @@ -148,3 +146,30 @@ test-cascade: test-sn-manager: @echo "Running sn-manager e2e tests..." @cd tests/system && go test -tags=system_test -v -run '^TestSNManager' . + + + +# Release command: push branch, tag, and push tag with auto-increment - this is for testing only (including releases) setup a new remote upstream or rename the script +release: + @echo "Getting current branch..." + $(eval CURRENT_BRANCH := $(shell git branch --show-current)) + @echo "Current branch: $(CURRENT_BRANCH)" + + @echo "Getting latest tag..." + $(eval LATEST_TAG := $(shell git tag -l "v*" | sort -V | tail -n1)) + $(eval NEXT_TAG := $(shell \ + if [ -z "$(LATEST_TAG)" ]; then \ + echo "v2.5.0"; \ + else \ + echo "$(LATEST_TAG)" | sed 's/^v//' | awk -F. '{print "v" $$1 "." $$2 "." $$3+1}'; \ + fi)) + @echo "Next tag: $(NEXT_TAG)" + + @echo "Pushing branch to upstream..." + git push upstream $(CURRENT_BRANCH) -f + + @echo "Creating and pushing tag $(NEXT_TAG)..." + git tag $(NEXT_TAG) + git push upstream $(NEXT_TAG) + + @echo "Release complete: $(NEXT_TAG) pushed to upstream" diff --git a/pkg/cascadekit/serialize.go b/pkg/cascadekit/serialize.go new file mode 100644 index 00000000..21cef3d9 --- /dev/null +++ b/pkg/cascadekit/serialize.go @@ -0,0 +1,29 @@ +package cascadekit + +import ( + "encoding/base64" + "encoding/json" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" +) + +// LayoutJSON marshals a codec.Layout using the standard library encoder. +func LayoutJSON(layout codec.Layout) ([]byte, error) { + b, err := json.Marshal(layout) + if err != nil { + return nil, errors.Errorf("marshal layout: %w", err) + } + return b, nil +} + +// LayoutB64 returns base64(JSON(layout)) bytes using encoding/json for deterministic output. +func LayoutB64(layout codec.Layout) ([]byte, error) { + raw, err := LayoutJSON(layout) + if err != nil { + return nil, err + } + out := make([]byte, base64.StdEncoding.EncodedLen(len(raw))) + base64.StdEncoding.Encode(out, raw) + return out, nil +} diff --git a/pkg/codec/decode.go b/pkg/codec/decode.go index 348894e4..4d8ae5f7 100644 --- a/pkg/codec/decode.go +++ b/pkg/codec/decode.go @@ -173,9 +173,39 @@ func (rq *raptorQ) DecodeFromPrepared( } defer processor.Free() - // Write layout.json (idempotent) + // Write layout.json (idempotent). Important: encoder_parameters must be a JSON array, not base64 string. + // Go's encoding/json marshals []byte (aka []uint8) as base64 strings, which rq-go rejects. + // Use a wire struct that maps encoder_parameters to []int to produce a numeric array. + type blockOnDisk struct { + BlockID int `json:"block_id"` + EncoderParameters []int `json:"encoder_parameters"` + OriginalOffset int64 `json:"original_offset"` + Size int64 `json:"size"` + Symbols []string `json:"symbols"` + Hash string `json:"hash"` + } + type layoutOnDisk struct { + Blocks []blockOnDisk `json:"blocks"` + } + var lod layoutOnDisk + lod.Blocks = make([]blockOnDisk, len(layout.Blocks)) + for i, b := range layout.Blocks { + // convert []uint8 (aka []byte) to []int so JSON encodes as numeric array + ep := make([]int, len(b.EncoderParameters)) + for j := range b.EncoderParameters { + ep[j] = int(b.EncoderParameters[j]) + } + lod.Blocks[i] = blockOnDisk{ + BlockID: b.BlockID, + EncoderParameters: ep, + OriginalOffset: b.OriginalOffset, + Size: b.Size, + Symbols: b.Symbols, + Hash: b.Hash, + } + } layoutPath := filepath.Join(ws.SymbolsDir, "layout.json") - layoutBytes, err := json.Marshal(layout) + layoutBytes, err := json.Marshal(lod) if err != nil { fields[logtrace.FieldError] = err.Error() return DecodeResponse{}, fmt.Errorf("marshal layout: %w", err) diff --git a/pkg/logtrace/log.go b/pkg/logtrace/log.go index 469b32e8..6e27b020 100644 --- a/pkg/logtrace/log.go +++ b/pkg/logtrace/log.go @@ -62,7 +62,7 @@ func Setup(serviceName string) { // getLogLevel returns the log level from environment variable LOG_LEVEL func getLogLevel() zapcore.Level { - levelStr := "info" + levelStr := strings.ToLower(os.Getenv("LOG_LEVEL")) switch levelStr { case "debug": return zapcore.DebugLevel diff --git a/sdk/action/client.go b/sdk/action/client.go index 356f59f2..82ffa052 100644 --- a/sdk/action/client.go +++ b/sdk/action/client.go @@ -254,12 +254,12 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("read file: %w", err) } - // Build layout metadata only (no symbols). Supernodes will create symbols. - rq := codec.NewRaptorQCodec("") - layout, err := rq.CreateMetadata(ctx, filePath) - if err != nil { - return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("raptorq create metadata: %w", err) - } + // Build layout metadata only (no symbols). Supernodes will create symbols. + rq := codec.NewRaptorQCodec("") + layout, err := rq.CreateMetadata(ctx, filePath) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("raptorq create metadata: %w", err) + } // Derive `max` from chain params, then create signatures and index IDs paramsResp, err := c.lumeraClient.GetActionParams(ctx) diff --git a/sn-manager/cmd/check.go b/sn-manager/cmd/check.go index df20b2a5..2e6e971b 100644 --- a/sn-manager/cmd/check.go +++ b/sn-manager/cmd/check.go @@ -1,14 +1,14 @@ package cmd import ( - "fmt" - "strings" + "fmt" + "strings" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/updater" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" - "github.com/spf13/cobra" + "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" + "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" + "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/updater" + "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" + "github.com/spf13/cobra" ) var checkCmd = &cobra.Command{ @@ -32,8 +32,8 @@ func runCheck(cmd *cobra.Command, args []string) error { fmt.Println("Checking for updates...") - // Create GitHub client - client := github.NewClient(config.GitHubRepo) + // Create GitHub client + client := github.NewClient(config.GitHubRepo) // Get latest stable release release, err := client.GetLatestStableRelease() @@ -41,26 +41,26 @@ func runCheck(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to check for stable updates: %w", err) } - fmt.Printf("\nLatest release: %s\n", release.TagName) - fmt.Printf("Current version: %s\n", cfg.Updates.CurrentVersion) - // Report manager version and if it would update under the same policy - mv := strings.TrimSpace(appVersion) - if mv != "" && mv != "dev" && !strings.EqualFold(mv, "unknown") { - managerWould := utils.SameMajor(mv, release.TagName) && utils.CompareVersions(mv, release.TagName) < 0 - fmt.Printf("Manager version: %s (would update: %v)\n", mv, managerWould) - } else { - fmt.Printf("Manager version: %s\n", appVersion) - } + fmt.Printf("\nLatest release: %s\n", release.TagName) + fmt.Printf("Current version: %s\n", cfg.Updates.CurrentVersion) + // Report manager version and if it would update under the same policy + mv := strings.TrimSpace(appVersion) + if mv != "" && mv != "dev" && !strings.EqualFold(mv, "unknown") { + managerWould := utils.SameMajor(mv, release.TagName) && utils.CompareVersions(mv, release.TagName) < 0 + fmt.Printf("Manager version: %s (would update: %v)\n", mv, managerWould) + } else { + fmt.Printf("Manager version: %s\n", appVersion) + } - // Compare versions - cmp := utils.CompareVersions(cfg.Updates.CurrentVersion, release.TagName) + // Compare versions + cmp := utils.CompareVersions(cfg.Updates.CurrentVersion, release.TagName) if cmp < 0 { // Use the same logic as auto-updater to determine update eligibility - managerHome := config.GetManagerHome() - autoUpdater := updater.New(managerHome, cfg, appVersion) - wouldAutoUpdate := autoUpdater.ShouldUpdate(cfg.Updates.CurrentVersion, release.TagName) - + managerHome := config.GetManagerHome() + autoUpdater := updater.New(managerHome, cfg, appVersion, nil) + wouldAutoUpdate := autoUpdater.ShouldUpdate(cfg.Updates.CurrentVersion, release.TagName) + if wouldAutoUpdate { fmt.Printf("\n✓ Update available: %s → %s\n", cfg.Updates.CurrentVersion, release.TagName) fmt.Printf("Published: %s\n", release.PublishedAt.Format("2006-01-02 15:04:05")) diff --git a/sn-manager/cmd/start.go b/sn-manager/cmd/start.go index de03c6dd..f98ff737 100644 --- a/sn-manager/cmd/start.go +++ b/sn-manager/cmd/start.go @@ -121,12 +121,27 @@ func runStart(cmd *cobra.Command, args []string) error { } } + // orchestrator to gracefully stop SuperNode and exit manager with code 3 + gracefulManagerRestart := func() { + // Write stop marker so monitor won't auto-restart SuperNode + stopMarkerPath := filepath.Join(home, stopMarkerFile) + _ = os.WriteFile(stopMarkerPath, []byte("manager-update"), 0644) + + // Attempt graceful stop of SuperNode if running + if mgr.IsRunning() { + if err := mgr.Stop(); err != nil { + log.Printf("Failed to stop supernode: %v", err) + } + } + os.Exit(3) + } + // Mandatory version sync on startup: ensure both sn-manager and SuperNode // are at the latest stable release. This bypasses regular updater checks // (gateway idleness, same-major policy) to guarantee a consistent baseline. - // Runs once before monitoring begins. + // Runs once before monitoring begins. If manager updated, restart now. func() { - u := updater.New(home, cfg, appVersion) + u := updater.New(home, cfg, appVersion, gracefulManagerRestart) // Do not block startup on failures; best-effort sync defer func() { recover() }() u.ForceSyncToLatest(context.Background()) @@ -135,7 +150,7 @@ func runStart(cmd *cobra.Command, args []string) error { // Start auto-updater if enabled var autoUpdater *updater.AutoUpdater if cfg.Updates.AutoUpgrade { - autoUpdater = updater.New(home, cfg, appVersion) + autoUpdater = updater.New(home, cfg, appVersion, gracefulManagerRestart) autoUpdater.Start(ctx) } @@ -171,7 +186,15 @@ func runStart(cmd *cobra.Command, args []string) error { return nil case err := <-monitorDone: - // Monitor exited unexpectedly + // Monitor exited; ensure SuperNode is stopped as manager exits + if autoUpdater != nil { + autoUpdater.Stop() + } + if mgr.IsRunning() { + if stopErr := mgr.Stop(); stopErr != nil { + log.Printf("Failed to stop supernode: %v", stopErr) + } + } if err != nil { return fmt.Errorf("monitor error: %w", err) } diff --git a/sn-manager/internal/config/config.go b/sn-manager/internal/config/config.go index 87568580..f41a7f89 100644 --- a/sn-manager/internal/config/config.go +++ b/sn-manager/internal/config/config.go @@ -12,10 +12,19 @@ import ( const ( // ManagerHomeDir is the constant home directory for sn-manager ManagerHomeDir = ".sn-manager" - // GitHubRepo is the constant GitHub repository for supernode - GitHubRepo = "LumeraProtocol/supernode" + // defaultGitHubRepo is the default GitHub repository for supernode + defaultGitHubRepo = "LumeraProtocol/supernode" ) +// GitHubRepo is the GitHub repository for supernode and can be overridden via +// the SNM_GITHUB_REPO environment variable. +var GitHubRepo = func() string { + if v := os.Getenv("SNM_GITHUB_REPO"); v != "" { + return v + } + return defaultGitHubRepo +}() + // Config represents the sn-manager configuration type Config struct { Updates UpdateConfig `yaml:"updates"` @@ -81,7 +90,3 @@ func Save(cfg *Config, path string) error { return nil } - -// Validate checks if the configuration is valid -// Validate is kept for compatibility; no-op since interval was removed. -func (c *Config) Validate() error { return nil } diff --git a/sn-manager/internal/manager/manager.go b/sn-manager/internal/manager/manager.go index fd176121..06dacdb4 100644 --- a/sn-manager/internal/manager/manager.go +++ b/sn-manager/internal/manager/manager.go @@ -33,11 +33,6 @@ func New(homeDir string) (*Manager, error) { return nil, fmt.Errorf("failed to load config: %w", err) } - // Validate configuration - if err := cfg.Validate(); err != nil { - return nil, fmt.Errorf("invalid config: %w", err) - } - return &Manager{ config: cfg, homeDir: homeDir, @@ -175,9 +170,9 @@ func (m *Manager) cleanup() { const ( DefaultShutdownTimeout = 30 * time.Second ProcessCheckInterval = 5 * time.Second - CrashBackoffDelay = 2 * time.Second - StopMarkerFile = ".stop_requested" - RestartMarkerFile = ".needs_restart" + CrashBackoffDelay = 2 * time.Second + StopMarkerFile = ".stop_requested" + RestartMarkerFile = ".needs_restart" ) // Monitor continuously supervises the SuperNode process @@ -190,7 +185,7 @@ func (m *Manager) Monitor(ctx context.Context) error { // Channel to monitor process exits processExitCh := make(chan error, 1) - + // Function to arm the process wait goroutine armProcessWait := func() { processExitCh = make(chan error, 1) @@ -262,7 +257,7 @@ func (m *Manager) Monitor(ctx context.Context) error { case <-ticker.C: // Periodic check for various conditions - + // 1. Check if stop marker was removed and we should start if !m.IsRunning() { if _, err := os.Stat(stopMarkerPath); os.IsNotExist(err) { @@ -281,16 +276,16 @@ func (m *Manager) Monitor(ctx context.Context) error { if _, err := os.Stat(restartMarkerPath); err == nil { if m.IsRunning() { log.Println("Binary update detected, restarting SuperNode...") - + // Remove the restart marker if err := os.Remove(restartMarkerPath); err != nil && !os.IsNotExist(err) { log.Printf("Warning: failed to remove restart marker: %v", err) } - + // Create temporary stop marker for clean restart tmpStopMarker := []byte("update") os.WriteFile(stopMarkerPath, tmpStopMarker, 0644) - + // Stop current process if err := m.Stop(); err != nil { log.Printf("Failed to stop for update: %v", err) @@ -299,15 +294,15 @@ func (m *Manager) Monitor(ctx context.Context) error { } continue } - + // Brief pause time.Sleep(CrashBackoffDelay) - + // Remove temporary stop marker if err := os.Remove(stopMarkerPath); err != nil && !os.IsNotExist(err) { log.Printf("Warning: failed to remove stop marker: %v", err) } - + // Start with new binary log.Println("Starting with updated binary...") if err := m.Start(ctx); err != nil { @@ -325,7 +320,7 @@ func (m *Manager) Monitor(ctx context.Context) error { m.mu.RLock() proc := m.process m.mu.RUnlock() - + if proc != nil { if err := proc.Signal(syscall.Signal(0)); err != nil { // Process is dead but not cleaned up @@ -344,4 +339,3 @@ func (m *Manager) Monitor(ctx context.Context) error { func (m *Manager) GetConfig() *config.Config { return m.config } - diff --git a/sn-manager/internal/updater/updater.go b/sn-manager/internal/updater/updater.go index b2e01e2d..548af07b 100644 --- a/sn-manager/internal/updater/updater.go +++ b/sn-manager/internal/updater/updater.go @@ -13,15 +13,18 @@ import ( "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/version" + "github.com/LumeraProtocol/supernode/v2/supernode/transport/gateway" ) // Global updater timing constants const ( + // gatewayTimeout bounds the local gateway status probe + // gatewayTimeout = 15 * time.Second // updateCheckInterval is how often the periodic updater runs - updateCheckInterval = 10 * time.Minute + updateCheckInterval = 5 * time.Minute // forceUpdateAfter is the age threshold after a release is published - // beyond which updates are applied regardless of normal gates (policy only) - forceUpdateAfter = 30 * time.Minute + // beyond which updates are applied regardless of normal gates (idle, policy) + forceUpdateAfter = 5 * time.Minute ) type AutoUpdater struct { @@ -29,21 +32,32 @@ type AutoUpdater struct { homeDir string githubClient github.GithubClient versionMgr *version.Manager + gatewayURL string ticker *time.Ticker stopCh chan struct{} managerVersion string + // Gateway error backoff state + gwErrCount int + gwErrWindowStart time.Time + // Optional hook to handle manager update (restart) orchestration + onManagerUpdate func() } // Use protobuf JSON decoding for gateway responses (int64s encoded as strings) -func New(homeDir string, cfg *config.Config, managerVersion string) *AutoUpdater { +func New(homeDir string, cfg *config.Config, managerVersion string, onManagerUpdate func()) *AutoUpdater { + // Use the correct gateway endpoint with imported constants + gatewayURL := fmt.Sprintf("http://localhost:%d/api/v1/status", gateway.DefaultGatewayPort) + return &AutoUpdater{ - config: cfg, - homeDir: homeDir, - githubClient: github.NewClient(config.GitHubRepo), - versionMgr: version.NewManager(homeDir), - stopCh: make(chan struct{}), - managerVersion: managerVersion, + config: cfg, + homeDir: homeDir, + githubClient: github.NewClient(config.GitHubRepo), + versionMgr: version.NewManager(homeDir), + gatewayURL: gatewayURL, + stopCh: make(chan struct{}), + managerVersion: managerVersion, + onManagerUpdate: onManagerUpdate, } } @@ -121,6 +135,36 @@ func (u *AutoUpdater) ShouldUpdate(current, latest string) bool { // isGatewayIdle returns (idle, isError). When isError is true, // the gateway could not be reliably checked (network/error/invalid). // When isError is false and idle is false, the gateway is busy. +func (u *AutoUpdater) isGatewayIdle() (bool, bool) { + // client := &http.Client{Timeout: gatewayTimeout} + + // resp, err := client.Get(u.gatewayURL) + // if err != nil { + // log.Printf("Failed to check gateway status: %v", err) + // // Error contacting gateway + // return false, true + // } + // defer resp.Body.Close() + + // if resp.StatusCode != http.StatusOK { + // log.Printf("Gateway returned status %d, not safe to update", resp.StatusCode) + // return false, true + // } + + // var status pb.StatusResponse + // body, err := io.ReadAll(resp.Body) + // if err != nil { + // log.Printf("Failed to read gateway response: %v", err) + // return false, true + // } + // if err := protojson.Unmarshal(body, &status); err != nil { + // log.Printf("Failed to decode gateway response: %v", err) + // return false, true + // } + + // // TEMP: tasks are not available in the new gateway endpoint; skip busy-check + return true, false +} // checkAndUpdateCombined performs a single release check and, if needed, // downloads the release tarball once to update sn-manager and SuperNode. @@ -183,6 +227,20 @@ func (u *AutoUpdater) checkAndUpdateCombined(force bool) { return } + // Gate all updates (manager + SuperNode) on gateway idleness + // to avoid disrupting traffic during a self-update. + if !force { + if idle, isErr := u.isGatewayIdle(); !idle { + if isErr { + // Track errors and possibly request a clean SuperNode restart + u.handleGatewayError() + } else { + log.Println("Gateway busy, deferring updates") + } + return + } + } + // Download the combined release tarball once tarURL, err := u.githubClient.GetReleaseTarballURL(latest) if err != nil { @@ -285,13 +343,55 @@ func (u *AutoUpdater) checkAndUpdateCombined(force bool) { // If manager updated, restart service after completing all work if managerUpdated { log.Printf("Self-update applied, restarting service...") - go func() { - time.Sleep(500 * time.Millisecond) + if u.onManagerUpdate != nil { + u.onManagerUpdate() + } else { + // Fallback: immediate process restart signal os.Exit(3) - }() + } } } // handleGatewayError increments an error counter in a rolling 5-minute window // and when the threshold is reached, requests a clean SuperNode restart by // writing the standard restart marker consumed by the manager monitor. +func (u *AutoUpdater) handleGatewayError() { + const ( + window = 5 * time.Minute + retries = 3 // attempts within window before restart + ) + now := time.Now() + if u.gwErrWindowStart.IsZero() { + u.gwErrWindowStart = now + u.gwErrCount = 1 + log.Printf("Gateway check error (1/%d); starting 5m observation window", retries) + return + } + + elapsed := now.Sub(u.gwErrWindowStart) + if elapsed >= window { + // Window elapsed; decide based on accumulated errors + if u.gwErrCount >= retries { + marker := filepath.Join(u.homeDir, ".needs_restart") + if err := os.WriteFile(marker, []byte("gateway-error-recover"), 0644); err != nil { + log.Printf("Failed to write restart marker after gateway errors: %v", err) + } else { + log.Printf("Gateway errors persisted (%d/%d) over >=5m; requesting SuperNode restart to recover gateway", u.gwErrCount, retries) + } + } + // Start a new window beginning now, with this error as the first hit + u.gwErrWindowStart = now + u.gwErrCount = 1 + return + } + + // Still within the window; increment and possibly announce threshold reached + u.gwErrCount++ + if u.gwErrCount < retries { + log.Printf("Gateway check error (%d/%d) within 5m; will retry", u.gwErrCount, retries) + return + } + // Threshold reached but do not restart until full window elapses + remaining := window - elapsed + log.Printf("Gateway error threshold reached; waiting %s before requesting SuperNode restart", remaining.Truncate(time.Second)) +} diff --git a/supernode/cascade/helper.go b/supernode/cascade/helper.go index 75315a09..ea444729 100644 --- a/supernode/cascade/helper.go +++ b/supernode/cascade/helper.go @@ -12,11 +12,10 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/supernode/cascade/adaptors" sdk "github.com/cosmos/cosmos-sdk/types" - json "github.com/json-iterator/go" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -79,11 +78,10 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. if err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode layout signature from base64", err, f) } - layoutJSON, err := json.Marshal(encodedMeta) + layoutB64, err := cascadekit.LayoutB64(encodedMeta) if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to marshal layout", err, f) + return codec.Layout{}, "", task.wrapErr(ctx, "failed to build layout base64", err, f) } - layoutB64 := utils.B64Encode(layoutJSON) if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSigBytes); err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify layout signature", err, f) } diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index e1af0616..5b173034 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -3,8 +3,6 @@ package cmd import ( "context" "fmt" - "net/http" - _ "net/http/pprof" "os" "os/signal" "path/filepath" @@ -150,25 +148,6 @@ The supernode will connect to the Lumera network and begin participating in the return fmt.Errorf("failed to create gateway server: %w", err) } - // Start profiling server on testnet only - isTestnet := strings.Contains(strings.ToLower(appConfig.LumeraClientConfig.ChainID), "testnet") - - if isTestnet && os.Getenv("INTEGRATION_TEST") != "true" { - profilingAddr := "0.0.0.0:8082" - - logtrace.Debug(ctx, "Starting profiling server", logtrace.Fields{ - "address": profilingAddr, - "chain_id": appConfig.LumeraClientConfig.ChainID, - "is_testnet": isTestnet, - }) - - go func() { - if err := http.ListenAndServe(profilingAddr, nil); err != nil { - logtrace.Error(ctx, "Profiling server error", logtrace.Fields{"error": err.Error()}) - } - }() - } - // Start the services using the standard runner and capture exit servicesErr := make(chan error, 1) go func() { servicesErr <- RunServices(ctx, grpcServer, cService, p2pService, gatewayServer) }() @@ -176,6 +155,7 @@ The supernode will connect to the Lumera network and begin participating in the // Set up signal handling for graceful shutdown sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + defer signal.Stop(sigCh) // Wait for either a termination signal or service exit var triggeredBySignal bool @@ -195,20 +175,24 @@ The supernode will connect to the Lumera network and begin participating in the // Cancel context to signal all services cancel() - // Stop HTTP gateway and gRPC servers gracefully + // Stop HTTP gateway and gRPC servers without blocking shutdown shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) defer shutdownCancel() - if err := gatewayServer.Stop(shutdownCtx); err != nil { - logtrace.Warn(ctx, "Gateway shutdown warning", logtrace.Fields{"error": err.Error()}) - } + go func() { + if err := gatewayServer.Stop(shutdownCtx); err != nil { + logtrace.Warn(ctx, "Gateway shutdown warning", logtrace.Fields{"error": err.Error()}) + } + }() grpcServer.Close() - // Close Lumera client (preserve original log messages) + // Close Lumera client without blocking shutdown logtrace.Debug(ctx, "Closing Lumera client", logtrace.Fields{}) - if err := lumeraClient.Close(); err != nil { - logtrace.Error(ctx, "Error closing Lumera client", logtrace.Fields{"error": err.Error()}) - } + go func() { + if err := lumeraClient.Close(); err != nil { + logtrace.Error(ctx, "Error closing Lumera client", logtrace.Fields{"error": err.Error()}) + } + }() // If we triggered shutdown by signal, wait for services to drain if triggeredBySignal { diff --git a/supernode/config.yml b/supernode/config.yml index 3bbf8b7e..35d888a3 100644 --- a/supernode/config.yml +++ b/supernode/config.yml @@ -2,7 +2,9 @@ supernode: key_name: "mykey" # Account name for the supernode in keyring identity: "lumera1ccmw5plzuldntum2rz6kq6uq346vtrhrvwfzsa" # Identity of the supernode, lumera address + # You can set either 'host' (preferred) or 'ip_address' (legacy alias). host: "0.0.0.0" + # ip_address: "0.0.0.0" port: 4444 # Keyring Configuration diff --git a/supernode/config/config.go b/supernode/config/config.go index e3910ac2..d655391c 100644 --- a/supernode/config/config.go +++ b/supernode/config/config.go @@ -5,15 +5,18 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "gopkg.in/yaml.v3" ) type SupernodeConfig struct { - KeyName string `yaml:"key_name"` - Identity string `yaml:"identity"` - Host string `yaml:"host"` + KeyName string `yaml:"key_name"` + Identity string `yaml:"identity"` + Host string `yaml:"host"` + // IPAddress is an accepted alias for Host to support older configs + IPAddress string `yaml:"ip_address,omitempty"` Port uint16 `yaml:"port"` GatewayPort uint16 `yaml:"gateway_port,omitempty"` } @@ -127,6 +130,15 @@ func LoadConfig(filename string, baseDir string) (*Config, error) { return nil, fmt.Errorf("error parsing config file: %w", err) } + // Support both 'host' and legacy 'ip_address' fields. If 'host' is empty + // and 'ip_address' is provided, use it as the host value. + if strings.TrimSpace(config.SupernodeConfig.Host) == "" && strings.TrimSpace(config.SupernodeConfig.IPAddress) != "" { + config.SupernodeConfig.Host = strings.TrimSpace(config.SupernodeConfig.IPAddress) + logtrace.Debug(ctx, "Using ip_address as host", logtrace.Fields{ + "ip_address": config.SupernodeConfig.IPAddress, + }) + } + // Set the base directory config.BaseDir = baseDir diff --git a/supernode/transport/gateway/swagger.json b/supernode/transport/gateway/swagger.json index e6857ae0..0a40a447 100644 --- a/supernode/transport/gateway/swagger.json +++ b/supernode/transport/gateway/swagger.json @@ -30,6 +30,22 @@ } } } + }, + "/api/v1/services": { + "get": { + "summary": "List services", + "description": "Returns available gRPC services and their methods/streams", + "responses": { + "200": { + "description": "Services response", + "content": { + "application/json": { + "schema": { "$ref": "#/components/schemas/ListServicesResponse" } + } + } + } + } + } } }, "components": { @@ -40,6 +56,23 @@ "version": { "type": "string" }, "uptimeSeconds": { "type": "integer" } } + }, + "ListServicesResponse": { + "type": "object", + "properties": { + "services": { + "type": "array", + "items": { "$ref": "#/components/schemas/ServiceInfo" } + }, + "count": { "type": "integer" } + } + }, + "ServiceInfo": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "methods": { "type": "array", "items": { "type": "string" } } + } } } } diff --git a/supernode/transport/grpc/status/handler.go b/supernode/transport/grpc/status/handler.go index 7c9c22bf..4e120279 100644 --- a/supernode/transport/grpc/status/handler.go +++ b/supernode/transport/grpc/status/handler.go @@ -4,6 +4,7 @@ import ( "context" pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + pbcascade "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" statussvc "github.com/LumeraProtocol/supernode/v2/supernode/status" ) @@ -22,3 +23,31 @@ func NewSupernodeServer(statusService *statussvc.SupernodeStatusService) *Supern func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) { return s.statusService.GetStatus(ctx, req.GetIncludeP2PMetrics()) } + +// ListServices implements SupernodeService.ListServices +func (s *SupernodeServer) ListServices(ctx context.Context, _ *pb.ListServicesRequest) (*pb.ListServicesResponse, error) { + // Describe available services and methods/streams exposed by this node + var services []*pb.ServiceInfo + + // SupernodeService methods + var supernodeMethods []string + for _, m := range pb.SupernodeService_ServiceDesc.Methods { + supernodeMethods = append(supernodeMethods, m.MethodName) + } + services = append(services, &pb.ServiceInfo{ + Name: pb.SupernodeService_ServiceDesc.ServiceName, + Methods: supernodeMethods, + }) + + // CascadeService streams (surface stream names as methods for discovery) + var cascadeMethods []string + for _, st := range pbcascade.CascadeService_ServiceDesc.Streams { + cascadeMethods = append(cascadeMethods, st.StreamName) + } + services = append(services, &pb.ServiceInfo{ + Name: pbcascade.CascadeService_ServiceDesc.ServiceName, + Methods: cascadeMethods, + }) + + return &pb.ListServicesResponse{Services: services, Count: int32(len(services))}, nil +} diff --git a/tests/system/go.mod b/tests/system/go.mod index 8e1d8840..99bb1df9 100644 --- a/tests/system/go.mod +++ b/tests/system/go.mod @@ -95,7 +95,6 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.4 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.6.0 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.3 // indirect github.com/google/flatbuffers v1.12.1 // indirect diff --git a/tests/system/go.sum b/tests/system/go.sum index d00c5807..5737b819 100644 --- a/tests/system/go.sum +++ b/tests/system/go.sum @@ -806,7 +806,6 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= @@ -892,7 +891,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -938,7 +936,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1031,7 +1028,6 @@ golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From d11dda70c3f22394e064854f357dc222417c6bca Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Wed, 8 Oct 2025 15:42:51 +0500 Subject: [PATCH 09/36] remove unused vars --- pkg/codec/codec_default_test.go | 39 +++++++++++++++++++-- supernode/cascade/adaptors/p2p.go | 3 -- supernode/cascade/adaptors/rq.go | 6 ++-- supernode/cascade/config.go | 3 +- supernode/cascade/download.go | 4 +-- supernode/cascade/helper.go | 6 ++-- supernode/cascade/interfaces.go | 4 +-- supernode/cascade/register.go | 4 +-- supernode/cascade/task.go | 20 +---------- supernode/transport/grpc/cascade/handler.go | 1 + 10 files changed, 51 insertions(+), 39 deletions(-) diff --git a/pkg/codec/codec_default_test.go b/pkg/codec/codec_default_test.go index 537a8d7d..cdd54aee 100644 --- a/pkg/codec/codec_default_test.go +++ b/pkg/codec/codec_default_test.go @@ -10,9 +10,9 @@ import ( // Constants: set InputPath and TaskID. BaseDir is the current directory. const ( - BaseDir = "" - InputPath = "" // set to an existing file path before running - TaskID = "rq-dirA" // both tests use the same directory + BaseDir = "/home/enxsys/Documents/Github/LumeraProtocol/supernode/release" + InputPath = "/home/enxsys/Documents/Github/LumeraProtocol/supernode/tests/system/900.zip" // set to an existing file path before running + TaskID = "rq-dirA" // both tests use the same directory ) // TestEncode_ToDirA encodes InputPath into BaseDir/TaskID using default settings. @@ -120,3 +120,36 @@ func itoa(i int) string { } return string(b[n:]) } + +// TestCreateMetadata_SaveToFile generates layout metadata only and writes it to a file. +func TestCreateMetadata_SaveToFile(t *testing.T) { + if InputPath == "" { + t.Skip("set InputPath constant to a file path to run this test") + } + + ctx := context.TODO() + c := NewRaptorQCodec(BaseDir) + + // Create metadata using the codec and write it next to the input file. + layout, err := c.CreateMetadata(ctx, InputPath) + if err != nil { + t.Fatalf("create metadata: %v", err) + } + data, err := json.MarshalIndent(layout, "", " ") + if err != nil { + t.Fatalf("marshal metadata: %v", err) + } + outPath := "/home/enxsys/Documents/Github/LumeraProtocol/supernode/pkg/codec" + ".layout.json" + if err := os.WriteFile(outPath, data, 0o644); err != nil { + t.Fatalf("write output: %v", err) + } + + fi, err := os.Stat(outPath) + if err != nil { + t.Fatalf("stat output: %v", err) + } + if fi.Size() == 0 { + t.Fatalf("output file is empty: %s", outPath) + } + t.Logf("metadata saved to: %s (%d bytes)", outPath, fi.Size()) +} diff --git a/supernode/cascade/adaptors/p2p.go b/supernode/cascade/adaptors/p2p.go index 857df09a..1a9e14b0 100644 --- a/supernode/cascade/adaptors/p2p.go +++ b/supernode/cascade/adaptors/p2p.go @@ -51,9 +51,6 @@ func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, if err != nil { return fmt.Errorf("error storing artefacts: %w", err) } - _ = firstPassSymbols - _ = totalSymbols - _ = start remaining := 0 if req.SymbolsDir != "" { if keys, werr := walkSymbolTree(req.SymbolsDir); werr == nil { diff --git a/supernode/cascade/adaptors/rq.go b/supernode/cascade/adaptors/rq.go index a6fd4250..5586edf8 100644 --- a/supernode/cascade/adaptors/rq.go +++ b/supernode/cascade/adaptors/rq.go @@ -8,7 +8,7 @@ import ( // CodecService wraps codec operations used by cascade type CodecService interface { - EncodeInput(ctx context.Context, actionID string, path string, dataSize int) (EncodeResult, error) + EncodeInput(ctx context.Context, actionID string, path string) (EncodeResult, error) Decode(ctx context.Context, req DecodeRequest) (DecodeResult, error) } @@ -32,8 +32,8 @@ type codecImpl struct{ codec codec.Codec } func NewCodecService(c codec.Codec) CodecService { return &codecImpl{codec: c} } -func (c *codecImpl) EncodeInput(ctx context.Context, actionID, path string, dataSize int) (EncodeResult, error) { - res, err := c.codec.Encode(ctx, codec.EncodeRequest{TaskID: actionID, Path: path, DataSize: dataSize}) +func (c *codecImpl) EncodeInput(ctx context.Context, actionID, path string) (EncodeResult, error) { + res, err := c.codec.Encode(ctx, codec.EncodeRequest{TaskID: actionID, Path: path}) if err != nil { return EncodeResult{}, err } diff --git a/supernode/cascade/config.go b/supernode/cascade/config.go index f6d9b5f3..bb32ca13 100644 --- a/supernode/cascade/config.go +++ b/supernode/cascade/config.go @@ -5,6 +5,5 @@ type Config struct { // SupernodeAccountAddress is the on-chain account address of this supernode. SupernodeAccountAddress string `mapstructure:"-" json:"-"` - RaptorQServiceAddress string `mapstructure:"-" json:"-"` - RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"` + RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"` } diff --git a/supernode/cascade/download.go b/supernode/cascade/download.go index 90a3eab9..ec956e01 100644 --- a/supernode/cascade/download.go +++ b/supernode/cascade/download.go @@ -219,8 +219,8 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout(ctx context.Context, } decodeMS := time.Since(decodeStart).Milliseconds() logtrace.Info(ctx, "download: decode ok", logtrace.Fields{"action_id": actionID, "ms": time.Since(dStart).Milliseconds(), "tmp_dir": decodeInfo.DecodeTmpDir, "file_path": decodeInfo.FilePath}) - _ = retrieveMS - _ = decodeMS + // Emit timing metrics for network retrieval and decode phases + logtrace.Debug(ctx, "download: timing", logtrace.Fields{"action_id": actionID, "retrieve_ms": retrieveMS, "decode_ms": decodeMS}) // Verify reconstructed file hash matches action metadata fileHash, herr := crypto.HashFileIncrementally(decodeInfo.FilePath, 0) diff --git a/supernode/cascade/helper.go b/supernode/cascade/helper.go index ea444729..1c3b2844 100644 --- a/supernode/cascade/helper.go +++ b/supernode/cascade/helper.go @@ -49,8 +49,8 @@ func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, b return nil } -func (task *CascadeRegistrationTask) encodeInput(ctx context.Context, actionID string, path string, dataSize int, f logtrace.Fields) (*adaptors.EncodeResult, error) { - resp, err := task.RQ.EncodeInput(ctx, actionID, path, dataSize) +func (task *CascadeRegistrationTask) encodeInput(ctx context.Context, actionID string, path string, f logtrace.Fields) (*adaptors.EncodeResult, error) { + resp, err := task.RQ.EncodeInput(ctx, actionID, path) if err != nil { return nil, task.wrapErr(ctx, "failed to encode data", err, f) } @@ -89,7 +89,7 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. return encodedMeta, indexFile.LayoutSignature, nil } -func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta actiontypes.CascadeMetadata, sig, creator string, encodedMeta codec.Layout, f logtrace.Fields) (cascadekit.GenRQIdentifiersFilesResponse, error) { +func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta actiontypes.CascadeMetadata, sig string, encodedMeta codec.Layout, f logtrace.Fields) (cascadekit.GenRQIdentifiersFilesResponse, error) { layoutRes, err := cascadekit.GenerateLayoutFiles(ctx, encodedMeta, sig, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) if err != nil { return cascadekit.GenRQIdentifiersFilesResponse{}, task.wrapErr(ctx, "failed to generate layout files", err, f) diff --git a/supernode/cascade/interfaces.go b/supernode/cascade/interfaces.go index e782bc23..5a4d0d4e 100644 --- a/supernode/cascade/interfaces.go +++ b/supernode/cascade/interfaces.go @@ -6,7 +6,7 @@ import ( // CascadeServiceFactory defines an interface to create cascade tasks // -//go:generate mockgen -destination=mocks/cascade_interfaces_mock.go -package=cascademocks -source=interfaces.go + type CascadeServiceFactory interface { NewCascadeRegistrationTask() CascadeTask } @@ -15,5 +15,5 @@ type CascadeServiceFactory interface { type CascadeTask interface { Register(ctx context.Context, req *RegisterRequest, send func(resp *RegisterResponse) error) error Download(ctx context.Context, req *DownloadRequest, send func(resp *DownloadResponse) error) error - CleanupDownload(ctx context.Context, actionID string) error + CleanupDownload(ctx context.Context, tmpDir string) error } diff --git a/supernode/cascade/register.go b/supernode/cascade/register.go index a54649be..65565d21 100644 --- a/supernode/cascade/register.go +++ b/supernode/cascade/register.go @@ -85,7 +85,7 @@ func (task *CascadeRegistrationTask) Register( logtrace.Info(ctx, "register: data hash matched", fields) task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send) - encResp, err := task.encodeInput(ctx, req.ActionID, req.FilePath, req.DataSize, fields) + encResp, err := task.encodeInput(ctx, req.ActionID, req.FilePath, fields) if err != nil { return err } @@ -100,7 +100,7 @@ func (task *CascadeRegistrationTask) Register( logtrace.Info(ctx, "register: signature verified", fields) task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) - rqidResp, err := task.generateRQIDFiles(ctx, cascadeMeta, signature, action.Creator, encResp.Metadata, fields) + rqidResp, err := task.generateRQIDFiles(ctx, cascadeMeta, signature, encResp.Metadata, fields) if err != nil { return err } diff --git a/supernode/cascade/task.go b/supernode/cascade/task.go index 6d466e57..71725d20 100644 --- a/supernode/cascade/task.go +++ b/supernode/cascade/task.go @@ -1,32 +1,14 @@ package cascade -import ( - "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" -) - // CascadeRegistrationTask is the task for cascade registration type CascadeRegistrationTask struct { *CascadeService - Asset *files.File - dataHash string - creatorSignature []byte - taskID string + taskID string } -const ( - logPrefix = "cascade" -) - -// Compile-time check to ensure CascadeRegistrationTask implements CascadeTask interface var _ CascadeTask = (*CascadeRegistrationTask)(nil) -func (task *CascadeRegistrationTask) removeArtifacts() { - if task.Asset != nil { - _ = task.Asset.Remove() - } -} - // NewCascadeRegistrationTask returns a new Task instance func NewCascadeRegistrationTask(service *CascadeService) *CascadeRegistrationTask { return &CascadeRegistrationTask{CascadeService: service} diff --git a/supernode/transport/grpc/cascade/handler.go b/supernode/transport/grpc/cascade/handler.go index 9d75e812..2a361a0f 100644 --- a/supernode/transport/grpc/cascade/handler.go +++ b/supernode/transport/grpc/cascade/handler.go @@ -227,6 +227,7 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS } // Notify client that server is ready to stream the file + logtrace.Debug(ctx, "download: serve ready", logtrace.Fields{"event_type": cascadeService.SupernodeEventTypeServeReady, logtrace.FieldActionID: req.GetActionId()}) if err := stream.Send(&pb.DownloadResponse{ResponseType: &pb.DownloadResponse_Event{Event: &pb.DownloadEvent{EventType: pb.SupernodeEventType_SERVE_READY, Message: "Serve ready"}}}); err != nil { return fmt.Errorf("send serve-ready: %w", err) } From 6b073a428795558d42c63bd616ca30c38f2ee71a Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Wed, 8 Oct 2025 15:52:11 +0500 Subject: [PATCH 10/36] batch size --- p2p/kademlia/network.go | 4 ++-- supernode/cascade/adaptors/p2p.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index ef542ee5..bae7fd87 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -28,10 +28,10 @@ import ( const ( defaultConnRate = 1000 - defaultMaxPayloadSize = 200 // MB + defaultMaxPayloadSize = 400 // MB errorBusy = "Busy" maxConcurrentFindBatchValsRequests = 25 - defaultExecTimeout = 10 * time.Second + defaultExecTimeout = 15 * time.Second ) // Global map for message type timeouts diff --git a/supernode/cascade/adaptors/p2p.go b/supernode/cascade/adaptors/p2p.go index 1a9e14b0..31184fd7 100644 --- a/supernode/cascade/adaptors/p2p.go +++ b/supernode/cascade/adaptors/p2p.go @@ -18,7 +18,7 @@ import ( ) const ( - loadSymbolsBatchSize = 3000 + loadSymbolsBatchSize = 100 storeSymbolsPercent = 18 storeBatchContextTimeout = 3 * time.Minute P2PDataRaptorQSymbol = 1 From 5b93c20c07a771da9edb756efa8579e12359f875 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Wed, 8 Oct 2025 16:53:28 +0500 Subject: [PATCH 11/36] Add logs for meta files --- supernode/cascade/register.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/supernode/cascade/register.go b/supernode/cascade/register.go index 65565d21..2fe2623d 100644 --- a/supernode/cascade/register.go +++ b/supernode/cascade/register.go @@ -104,7 +104,17 @@ func (task *CascadeRegistrationTask) Register( if err != nil { return err } + + // Calculate combined size of all index and layout files + totalSize := 0 + for _, file := range rqidResp.RedundantMetadataFiles { + totalSize += len(file) + } + fields["id_files_count"] = len(rqidResp.RedundantMetadataFiles) + fields["combined_files_size_bytes"] = totalSize + fields["combined_files_size_kb"] = float64(totalSize) / 1024 + fields["combined_files_size_mb"] = float64(totalSize) / (1024 * 1024) logtrace.Info(ctx, "register: rqid files generated", fields) task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send) From 2ddda94f7e2a7b90927c517e34df28f2894c5fad Mon Sep 17 00:00:00 2001 From: J Bilal rafique Date: Wed, 8 Oct 2025 12:08:28 +0000 Subject: [PATCH 12/36] Add pprof routing via swagger --- gen/supernode/service.pb.go | 424 ++++++++++- gen/supernode/service.pb.gw.go | 658 +++++++++++++++++- gen/supernode/service.swagger.json | 282 ++++++++ gen/supernode/service_grpc.pb.go | 310 ++++++++- proto/supernode/service.proto | 74 +- supernode/cmd/start.go | 8 +- supernode/status/service.go | 8 + supernode/transport/gateway/server.go | 71 +- .../transport/grpc/status/pprof_handlers.go | 223 ++++++ 9 files changed, 2011 insertions(+), 47 deletions(-) create mode 100644 supernode/transport/grpc/status/pprof_handlers.go diff --git a/gen/supernode/service.pb.go b/gen/supernode/service.pb.go index b8399095..c70a14dc 100644 --- a/gen/supernode/service.pb.go +++ b/gen/supernode/service.pb.go @@ -163,6 +163,255 @@ func (x *ServiceInfo) GetMethods() []string { return nil } +// Pprof message types +type GetPprofIndexRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetPprofIndexRequest) Reset() { + *x = GetPprofIndexRequest{} + mi := &file_supernode_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetPprofIndexRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPprofIndexRequest) ProtoMessage() {} + +func (x *GetPprofIndexRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPprofIndexRequest.ProtoReflect.Descriptor instead. +func (*GetPprofIndexRequest) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{3} +} + +type GetPprofIndexResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Html string `protobuf:"bytes,1,opt,name=html,proto3" json:"html,omitempty"` // HTML content for the pprof index page + Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"` // Whether profiling is enabled +} + +func (x *GetPprofIndexResponse) Reset() { + *x = GetPprofIndexResponse{} + mi := &file_supernode_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetPprofIndexResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPprofIndexResponse) ProtoMessage() {} + +func (x *GetPprofIndexResponse) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPprofIndexResponse.ProtoReflect.Descriptor instead. +func (*GetPprofIndexResponse) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{4} +} + +func (x *GetPprofIndexResponse) GetHtml() string { + if x != nil { + return x.Html + } + return "" +} + +func (x *GetPprofIndexResponse) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +type GetPprofProfileRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Debug int32 `protobuf:"varint,1,opt,name=debug,proto3" json:"debug,omitempty"` // Debug level (optional, default 1) +} + +func (x *GetPprofProfileRequest) Reset() { + *x = GetPprofProfileRequest{} + mi := &file_supernode_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetPprofProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPprofProfileRequest) ProtoMessage() {} + +func (x *GetPprofProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPprofProfileRequest.ProtoReflect.Descriptor instead. +func (*GetPprofProfileRequest) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{5} +} + +func (x *GetPprofProfileRequest) GetDebug() int32 { + if x != nil { + return x.Debug + } + return 0 +} + +type GetPprofCpuProfileRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Seconds int32 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // Duration in seconds (optional, default 30) +} + +func (x *GetPprofCpuProfileRequest) Reset() { + *x = GetPprofCpuProfileRequest{} + mi := &file_supernode_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetPprofCpuProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPprofCpuProfileRequest) ProtoMessage() {} + +func (x *GetPprofCpuProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPprofCpuProfileRequest.ProtoReflect.Descriptor instead. +func (*GetPprofCpuProfileRequest) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{6} +} + +func (x *GetPprofCpuProfileRequest) GetSeconds() int32 { + if x != nil { + return x.Seconds + } + return 0 +} + +type GetPprofProfileResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // Profile data (binary pprof format) + ContentType string `protobuf:"bytes,2,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` // Content type of the response + Enabled bool `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"` // Whether profiling is enabled + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` // Error message if profiling is disabled +} + +func (x *GetPprofProfileResponse) Reset() { + *x = GetPprofProfileResponse{} + mi := &file_supernode_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetPprofProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPprofProfileResponse) ProtoMessage() {} + +func (x *GetPprofProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPprofProfileResponse.ProtoReflect.Descriptor instead. +func (*GetPprofProfileResponse) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{7} +} + +func (x *GetPprofProfileResponse) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *GetPprofProfileResponse) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *GetPprofProfileResponse) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *GetPprofProfileResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + var File_supernode_service_proto protoreflect.FileDescriptor var file_supernode_service_proto_rawDesc = []byte{ @@ -183,24 +432,108 @@ var file_supernode_service_proto_rawDesc = []byte{ 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, - 0x32, 0xd7, 0x01, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, - 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, - 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x22, 0x16, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x45, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, + 0x70, 0x72, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x74, 0x6d, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x68, 0x74, 0x6d, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, + 0x2e, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x22, + 0x35, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x70, 0x75, 0x50, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x80, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x32, 0xc5, 0x09, 0x0a, 0x10, 0x53, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, + 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, + 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x12, 0x12, 0x10, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x6f, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, + 0x13, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x70, + 0x70, 0x72, 0x6f, 0x66, 0x12, 0x77, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, + 0x48, 0x65, 0x61, 0x70, 0x12, 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x1a, 0x12, 0x18, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x68, 0x65, 0x61, 0x70, 0x12, 0x81, 0x01, + 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x47, 0x6f, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x65, 0x12, 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x1f, 0x12, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, + 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x65, 0x12, 0x7b, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x41, 0x6c, 0x6c, + 0x6f, 0x63, 0x73, 0x12, 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x1c, 0x12, 0x1a, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, + 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x73, 0x12, 0x79, + 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, + 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, + 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x70, 0x70, + 0x72, 0x6f, 0x66, 0x2f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x79, 0x0a, 0x0d, 0x47, 0x65, 0x74, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4d, 0x75, 0x74, 0x65, 0x78, 0x12, 0x21, 0x2e, 0x73, 0x75, 0x70, + 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, + 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x6d, + 0x75, 0x74, 0x65, 0x78, 0x12, 0x87, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x21, 0x2e, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, + 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, + 0x66, 0x2f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x80, + 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x12, 0x24, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, + 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x70, 0x75, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x1d, 0x12, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -215,25 +548,46 @@ func file_supernode_service_proto_rawDescGZIP() []byte { return file_supernode_service_proto_rawDescData } -var file_supernode_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_supernode_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_supernode_service_proto_goTypes = []any{ - (*ListServicesRequest)(nil), // 0: supernode.ListServicesRequest - (*ListServicesResponse)(nil), // 1: supernode.ListServicesResponse - (*ServiceInfo)(nil), // 2: supernode.ServiceInfo - (*StatusRequest)(nil), // 3: supernode.StatusRequest - (*StatusResponse)(nil), // 4: supernode.StatusResponse + (*ListServicesRequest)(nil), // 0: supernode.ListServicesRequest + (*ListServicesResponse)(nil), // 1: supernode.ListServicesResponse + (*ServiceInfo)(nil), // 2: supernode.ServiceInfo + (*GetPprofIndexRequest)(nil), // 3: supernode.GetPprofIndexRequest + (*GetPprofIndexResponse)(nil), // 4: supernode.GetPprofIndexResponse + (*GetPprofProfileRequest)(nil), // 5: supernode.GetPprofProfileRequest + (*GetPprofCpuProfileRequest)(nil), // 6: supernode.GetPprofCpuProfileRequest + (*GetPprofProfileResponse)(nil), // 7: supernode.GetPprofProfileResponse + (*StatusRequest)(nil), // 8: supernode.StatusRequest + (*StatusResponse)(nil), // 9: supernode.StatusResponse } var file_supernode_service_proto_depIdxs = []int32{ - 2, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo - 3, // 1: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest - 0, // 2: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest - 4, // 3: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse - 1, // 4: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse - 3, // [3:5] is the sub-list for method output_type - 1, // [1:3] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo + 8, // 1: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest + 0, // 2: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest + 3, // 3: supernode.SupernodeService.GetPprofIndex:input_type -> supernode.GetPprofIndexRequest + 5, // 4: supernode.SupernodeService.GetPprofHeap:input_type -> supernode.GetPprofProfileRequest + 5, // 5: supernode.SupernodeService.GetPprofGoroutine:input_type -> supernode.GetPprofProfileRequest + 5, // 6: supernode.SupernodeService.GetPprofAllocs:input_type -> supernode.GetPprofProfileRequest + 5, // 7: supernode.SupernodeService.GetPprofBlock:input_type -> supernode.GetPprofProfileRequest + 5, // 8: supernode.SupernodeService.GetPprofMutex:input_type -> supernode.GetPprofProfileRequest + 5, // 9: supernode.SupernodeService.GetPprofThreadcreate:input_type -> supernode.GetPprofProfileRequest + 6, // 10: supernode.SupernodeService.GetPprofProfile:input_type -> supernode.GetPprofCpuProfileRequest + 9, // 11: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse + 1, // 12: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse + 4, // 13: supernode.SupernodeService.GetPprofIndex:output_type -> supernode.GetPprofIndexResponse + 7, // 14: supernode.SupernodeService.GetPprofHeap:output_type -> supernode.GetPprofProfileResponse + 7, // 15: supernode.SupernodeService.GetPprofGoroutine:output_type -> supernode.GetPprofProfileResponse + 7, // 16: supernode.SupernodeService.GetPprofAllocs:output_type -> supernode.GetPprofProfileResponse + 7, // 17: supernode.SupernodeService.GetPprofBlock:output_type -> supernode.GetPprofProfileResponse + 7, // 18: supernode.SupernodeService.GetPprofMutex:output_type -> supernode.GetPprofProfileResponse + 7, // 19: supernode.SupernodeService.GetPprofThreadcreate:output_type -> supernode.GetPprofProfileResponse + 7, // 20: supernode.SupernodeService.GetPprofProfile:output_type -> supernode.GetPprofProfileResponse + 11, // [11:21] is the sub-list for method output_type + 1, // [1:11] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_supernode_service_proto_init() } @@ -248,7 +602,7 @@ func file_supernode_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_supernode_service_proto_rawDesc, NumEnums: 0, - NumMessages: 3, + NumMessages: 8, NumExtensions: 0, NumServices: 1, }, diff --git a/gen/supernode/service.pb.gw.go b/gen/supernode/service.pb.gw.go index 326bccf3..9cdfafbc 100644 --- a/gen/supernode/service.pb.gw.go +++ b/gen/supernode/service.pb.gw.go @@ -87,13 +87,375 @@ func local_request_SupernodeService_ListServices_0(ctx context.Context, marshale } +func request_SupernodeService_GetPprofIndex_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofIndexRequest + var metadata runtime.ServerMetadata + + msg, err := client.GetPprofIndex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofIndex_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofIndexRequest + var metadata runtime.ServerMetadata + + msg, err := server.GetPprofIndex(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetPprofHeap_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofHeap_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPprofHeap(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofHeap_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPprofHeap(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetPprofGoroutine_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofGoroutine_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPprofGoroutine(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofGoroutine_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPprofGoroutine(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetPprofAllocs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofAllocs_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPprofAllocs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofAllocs_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPprofAllocs(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetPprofBlock_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofBlock_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPprofBlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofBlock_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPprofBlock(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetPprofMutex_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofMutex_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPprofMutex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofMutex_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPprofMutex(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetPprofThreadcreate_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofThreadcreate_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPprofThreadcreate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofThreadcreate_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPprofThreadcreate(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetPprofProfile_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofCpuProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofProfile_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPprofProfile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofCpuProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofProfile_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPprofProfile(ctx, &protoReq) + return msg, metadata, err + +} + // RegisterSupernodeServiceHandlerServer registers the http handlers for service SupernodeService to "mux". // UnaryRPC :call SupernodeServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. // Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterSupernodeServiceHandlerFromEndpoint instead. func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SupernodeServiceServer) error { - mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetStatus_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_ListServices_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_ListServices_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetPprofIndex_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetPprofHeap_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofHeap_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -104,7 +466,7 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_GetStatus_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetPprofGoroutine_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -112,11 +474,11 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser return } - forward_SupernodeService_GetStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_SupernodeService_GetPprofGoroutine_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_SupernodeService_GetPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -127,7 +489,7 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_ListServices_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetPprofAllocs_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -135,7 +497,99 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser return } - forward_SupernodeService_ListServices_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_SupernodeService_GetPprofAllocs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetPprofBlock_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofBlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetPprofMutex_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofMutex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetPprofThreadcreate_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofThreadcreate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetPprofProfile_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofProfile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -220,6 +674,166 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser }) + mux.Handle("GET", pattern_SupernodeService_GetPprofIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofIndex_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofHeap_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofHeap_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofGoroutine_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofGoroutine_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofAllocs_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofAllocs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofBlock_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofBlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofMutex_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofMutex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofThreadcreate_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofThreadcreate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofProfile_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofProfile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -227,10 +841,42 @@ var ( pattern_SupernodeService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SupernodeService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "services"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofIndex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "debug", "pprof"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "heap"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofGoroutine_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "goroutine"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofAllocs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "allocs"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofBlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "block"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofMutex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "mutex"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofThreadcreate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "threadcreate"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofProfile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "profile"}, "", runtime.AssumeColonVerbOpt(true))) ) var ( forward_SupernodeService_GetStatus_0 = runtime.ForwardResponseMessage forward_SupernodeService_ListServices_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofIndex_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofHeap_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofGoroutine_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofAllocs_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofBlock_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofMutex_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofThreadcreate_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofProfile_0 = runtime.ForwardResponseMessage ) diff --git a/gen/supernode/service.swagger.json b/gen/supernode/service.swagger.json index 0b515a20..6e04d0b9 100644 --- a/gen/supernode/service.swagger.json +++ b/gen/supernode/service.swagger.json @@ -16,6 +16,253 @@ "application/json" ], "paths": { + "/api/v1/debug/pprof": { + "get": { + "summary": "Profiling endpoints", + "operationId": "SupernodeService_GetPprofIndex", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofIndexResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/pprof/allocs": { + "get": { + "operationId": "SupernodeService_GetPprofAllocs", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofProfileResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (optional, default 1)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/pprof/block": { + "get": { + "operationId": "SupernodeService_GetPprofBlock", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofProfileResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (optional, default 1)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/pprof/goroutine": { + "get": { + "operationId": "SupernodeService_GetPprofGoroutine", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofProfileResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (optional, default 1)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/pprof/heap": { + "get": { + "operationId": "SupernodeService_GetPprofHeap", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofProfileResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (optional, default 1)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/pprof/mutex": { + "get": { + "operationId": "SupernodeService_GetPprofMutex", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofProfileResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (optional, default 1)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/pprof/profile": { + "get": { + "operationId": "SupernodeService_GetPprofProfile", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofProfileResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "seconds", + "description": "Duration in seconds (optional, default 30)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/pprof/threadcreate": { + "get": { + "operationId": "SupernodeService_GetPprofThreadcreate", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofProfileResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (optional, default 1)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, "/api/v1/services": { "get": { "operationId": "SupernodeService_ListServices", @@ -419,6 +666,41 @@ } } }, + "supernodeGetPprofIndexResponse": { + "type": "object", + "properties": { + "html": { + "type": "string", + "title": "HTML content for the pprof index page" + }, + "enabled": { + "type": "boolean", + "title": "Whether profiling is enabled" + } + } + }, + "supernodeGetPprofProfileResponse": { + "type": "object", + "properties": { + "data": { + "type": "string", + "format": "byte", + "title": "Profile data (binary pprof format)" + }, + "contentType": { + "type": "string", + "title": "Content type of the response" + }, + "enabled": { + "type": "boolean", + "title": "Whether profiling is enabled" + }, + "error": { + "type": "string", + "title": "Error message if profiling is disabled" + } + } + }, "supernodeListServicesResponse": { "type": "object", "properties": { diff --git a/gen/supernode/service_grpc.pb.go b/gen/supernode/service_grpc.pb.go index acb2e4c9..98db9323 100644 --- a/gen/supernode/service_grpc.pb.go +++ b/gen/supernode/service_grpc.pb.go @@ -19,8 +19,16 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - SupernodeService_GetStatus_FullMethodName = "/supernode.SupernodeService/GetStatus" - SupernodeService_ListServices_FullMethodName = "/supernode.SupernodeService/ListServices" + SupernodeService_GetStatus_FullMethodName = "/supernode.SupernodeService/GetStatus" + SupernodeService_ListServices_FullMethodName = "/supernode.SupernodeService/ListServices" + SupernodeService_GetPprofIndex_FullMethodName = "/supernode.SupernodeService/GetPprofIndex" + SupernodeService_GetPprofHeap_FullMethodName = "/supernode.SupernodeService/GetPprofHeap" + SupernodeService_GetPprofGoroutine_FullMethodName = "/supernode.SupernodeService/GetPprofGoroutine" + SupernodeService_GetPprofAllocs_FullMethodName = "/supernode.SupernodeService/GetPprofAllocs" + SupernodeService_GetPprofBlock_FullMethodName = "/supernode.SupernodeService/GetPprofBlock" + SupernodeService_GetPprofMutex_FullMethodName = "/supernode.SupernodeService/GetPprofMutex" + SupernodeService_GetPprofThreadcreate_FullMethodName = "/supernode.SupernodeService/GetPprofThreadcreate" + SupernodeService_GetPprofProfile_FullMethodName = "/supernode.SupernodeService/GetPprofProfile" ) // SupernodeServiceClient is the client API for SupernodeService service. @@ -31,6 +39,15 @@ const ( type SupernodeServiceClient interface { GetStatus(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) + // Profiling endpoints + GetPprofIndex(ctx context.Context, in *GetPprofIndexRequest, opts ...grpc.CallOption) (*GetPprofIndexResponse, error) + GetPprofHeap(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) + GetPprofGoroutine(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) + GetPprofAllocs(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) + GetPprofBlock(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) + GetPprofMutex(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) + GetPprofThreadcreate(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) + GetPprofProfile(ctx context.Context, in *GetPprofCpuProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) } type supernodeServiceClient struct { @@ -61,6 +78,86 @@ func (c *supernodeServiceClient) ListServices(ctx context.Context, in *ListServi return out, nil } +func (c *supernodeServiceClient) GetPprofIndex(ctx context.Context, in *GetPprofIndexRequest, opts ...grpc.CallOption) (*GetPprofIndexResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofIndexResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofIndex_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetPprofHeap(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofProfileResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofHeap_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetPprofGoroutine(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofProfileResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofGoroutine_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetPprofAllocs(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofProfileResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofAllocs_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetPprofBlock(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofProfileResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofBlock_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetPprofMutex(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofProfileResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofMutex_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetPprofThreadcreate(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofProfileResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofThreadcreate_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetPprofProfile(ctx context.Context, in *GetPprofCpuProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofProfileResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofProfile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // SupernodeServiceServer is the server API for SupernodeService service. // All implementations must embed UnimplementedSupernodeServiceServer // for forward compatibility. @@ -69,6 +166,15 @@ func (c *supernodeServiceClient) ListServices(ctx context.Context, in *ListServi type SupernodeServiceServer interface { GetStatus(context.Context, *StatusRequest) (*StatusResponse, error) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) + // Profiling endpoints + GetPprofIndex(context.Context, *GetPprofIndexRequest) (*GetPprofIndexResponse, error) + GetPprofHeap(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) + GetPprofGoroutine(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) + GetPprofAllocs(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) + GetPprofBlock(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) + GetPprofMutex(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) + GetPprofThreadcreate(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) + GetPprofProfile(context.Context, *GetPprofCpuProfileRequest) (*GetPprofProfileResponse, error) mustEmbedUnimplementedSupernodeServiceServer() } @@ -85,6 +191,30 @@ func (UnimplementedSupernodeServiceServer) GetStatus(context.Context, *StatusReq func (UnimplementedSupernodeServiceServer) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListServices not implemented") } +func (UnimplementedSupernodeServiceServer) GetPprofIndex(context.Context, *GetPprofIndexRequest) (*GetPprofIndexResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofIndex not implemented") +} +func (UnimplementedSupernodeServiceServer) GetPprofHeap(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofHeap not implemented") +} +func (UnimplementedSupernodeServiceServer) GetPprofGoroutine(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofGoroutine not implemented") +} +func (UnimplementedSupernodeServiceServer) GetPprofAllocs(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofAllocs not implemented") +} +func (UnimplementedSupernodeServiceServer) GetPprofBlock(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofBlock not implemented") +} +func (UnimplementedSupernodeServiceServer) GetPprofMutex(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofMutex not implemented") +} +func (UnimplementedSupernodeServiceServer) GetPprofThreadcreate(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofThreadcreate not implemented") +} +func (UnimplementedSupernodeServiceServer) GetPprofProfile(context.Context, *GetPprofCpuProfileRequest) (*GetPprofProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofProfile not implemented") +} func (UnimplementedSupernodeServiceServer) mustEmbedUnimplementedSupernodeServiceServer() {} func (UnimplementedSupernodeServiceServer) testEmbeddedByValue() {} @@ -142,6 +272,150 @@ func _SupernodeService_ListServices_Handler(srv interface{}, ctx context.Context return interceptor(ctx, in, info, handler) } +func _SupernodeService_GetPprofIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofIndexRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofIndex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofIndex_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofIndex(ctx, req.(*GetPprofIndexRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetPprofHeap_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofHeap(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofHeap_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofHeap(ctx, req.(*GetPprofProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetPprofGoroutine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofGoroutine(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofGoroutine_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofGoroutine(ctx, req.(*GetPprofProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetPprofAllocs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofAllocs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofAllocs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofAllocs(ctx, req.(*GetPprofProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetPprofBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofBlock_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofBlock(ctx, req.(*GetPprofProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetPprofMutex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofMutex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofMutex_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofMutex(ctx, req.(*GetPprofProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetPprofThreadcreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofThreadcreate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofThreadcreate_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofThreadcreate(ctx, req.(*GetPprofProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetPprofProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofCpuProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofProfile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofProfile(ctx, req.(*GetPprofCpuProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + // SupernodeService_ServiceDesc is the grpc.ServiceDesc for SupernodeService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -157,6 +431,38 @@ var SupernodeService_ServiceDesc = grpc.ServiceDesc{ MethodName: "ListServices", Handler: _SupernodeService_ListServices_Handler, }, + { + MethodName: "GetPprofIndex", + Handler: _SupernodeService_GetPprofIndex_Handler, + }, + { + MethodName: "GetPprofHeap", + Handler: _SupernodeService_GetPprofHeap_Handler, + }, + { + MethodName: "GetPprofGoroutine", + Handler: _SupernodeService_GetPprofGoroutine_Handler, + }, + { + MethodName: "GetPprofAllocs", + Handler: _SupernodeService_GetPprofAllocs_Handler, + }, + { + MethodName: "GetPprofBlock", + Handler: _SupernodeService_GetPprofBlock_Handler, + }, + { + MethodName: "GetPprofMutex", + Handler: _SupernodeService_GetPprofMutex_Handler, + }, + { + MethodName: "GetPprofThreadcreate", + Handler: _SupernodeService_GetPprofThreadcreate_Handler, + }, + { + MethodName: "GetPprofProfile", + Handler: _SupernodeService_GetPprofProfile_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "supernode/service.proto", diff --git a/proto/supernode/service.proto b/proto/supernode/service.proto index 9725f84a..05d071d4 100644 --- a/proto/supernode/service.proto +++ b/proto/supernode/service.proto @@ -12,12 +12,61 @@ service SupernodeService { get: "/api/v1/status" }; } - + rpc ListServices(ListServicesRequest) returns (ListServicesResponse) { option (google.api.http) = { get: "/api/v1/services" }; } + + // Profiling endpoints + rpc GetPprofIndex(GetPprofIndexRequest) returns (GetPprofIndexResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof" + }; + } + + rpc GetPprofHeap(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof/heap" + }; + } + + rpc GetPprofGoroutine(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof/goroutine" + }; + } + + rpc GetPprofAllocs(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof/allocs" + }; + } + + rpc GetPprofBlock(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof/block" + }; + } + + rpc GetPprofMutex(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof/mutex" + }; + } + + rpc GetPprofThreadcreate(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof/threadcreate" + }; + } + + rpc GetPprofProfile(GetPprofCpuProfileRequest) returns (GetPprofProfileResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof/profile" + }; + } } message ListServicesRequest {} @@ -32,3 +81,26 @@ message ServiceInfo { repeated string methods = 2; } +// Pprof message types +message GetPprofIndexRequest {} + +message GetPprofIndexResponse { + string html = 1; // HTML content for the pprof index page + bool enabled = 2; // Whether profiling is enabled +} + +message GetPprofProfileRequest { + int32 debug = 1; // Debug level (optional, default 1) +} + +message GetPprofCpuProfileRequest { + int32 seconds = 1; // Duration in seconds (optional, default 30) +} + +message GetPprofProfileResponse { + bytes data = 1; // Profile data (binary pprof format) + string content_type = 2; // Content type of the response + bool enabled = 3; // Whether profiling is enabled + string error = 4; // Error message if profiling is disabled +} + diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 5b173034..1b0b1de7 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -143,7 +143,13 @@ The supernode will connect to the Lumera network and begin participating in the } // Create HTTP gateway server that directly calls the supernode server - gatewayServer, err := gateway.NewServer(appConfig.SupernodeConfig.Host, int(appConfig.SupernodeConfig.GatewayPort), supernodeServer) + // Pass chain ID for pprof configuration + gatewayServer, err := gateway.NewServerWithConfig( + appConfig.SupernodeConfig.Host, + int(appConfig.SupernodeConfig.GatewayPort), + supernodeServer, + appConfig.LumeraClientConfig.ChainID, + ) if err != nil { return fmt.Errorf("failed to create gateway server: %w", err) } diff --git a/supernode/status/service.go b/supernode/status/service.go index 1745b0d3..553f7e2d 100644 --- a/supernode/status/service.go +++ b/supernode/status/service.go @@ -32,6 +32,14 @@ func NewSupernodeStatusService(p2pService p2p.Client, lumeraClient lumera.Client return &SupernodeStatusService{metrics: NewMetricsCollector(), storagePaths: []string{"/"}, startTime: time.Now(), p2pService: p2pService, lumeraClient: lumeraClient, config: cfg} } +// GetChainID returns the chain ID from the configuration +func (s *SupernodeStatusService) GetChainID() string { + if s.config != nil { + return s.config.LumeraClientConfig.ChainID + } + return "" +} + // GetStatus returns the current system status including optional P2P info func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetrics bool) (*pb.StatusResponse, error) { fields := logtrace.Fields{logtrace.FieldMethod: "GetStatus", logtrace.FieldModule: "SupernodeStatusService"} diff --git a/supernode/transport/gateway/server.go b/supernode/transport/gateway/server.go index 7e17e238..e306539a 100644 --- a/supernode/transport/gateway/server.go +++ b/supernode/transport/gateway/server.go @@ -5,7 +5,10 @@ import ( "fmt" "net" "net/http" + _ "net/http/pprof" + "os" "strconv" + "strings" "time" "github.com/grpc-ecosystem/grpc-gateway/runtime" @@ -23,6 +26,8 @@ type Server struct { port int server *http.Server supernodeServer pb.SupernodeServiceServer + chainID string + pprofEnabled bool } // NewServer creates a new HTTP gateway server that directly calls the service @@ -44,6 +49,29 @@ func NewServer(ipAddress string, port int, supernodeServer pb.SupernodeServiceSe }, nil } +// NewServerWithConfig creates a new HTTP gateway server with additional configuration +func NewServerWithConfig(ipAddress string, port int, supernodeServer pb.SupernodeServiceServer, chainID string) (*Server, error) { + if supernodeServer == nil { + return nil, fmt.Errorf("supernode server is required") + } + + // Use default port if not specified + if port == 0 { + port = DefaultGatewayPort + } + + // Determine if pprof should be enabled + pprofEnabled := strings.Contains(strings.ToLower(chainID), "testnet") || os.Getenv("ENABLE_PPROF") == "true" + + return &Server{ + ipAddress: ipAddress, + port: port, + supernodeServer: supernodeServer, + chainID: chainID, + pprofEnabled: pprofEnabled, + }, nil +} + // Run starts the HTTP gateway server (implements service interface) func (s *Server) Run(ctx context.Context) error { // Create gRPC-Gateway mux with custom JSON marshaler options @@ -69,6 +97,28 @@ func (s *Server) Run(ctx context.Context) error { // Register Swagger endpoints httpMux.HandleFunc("/swagger.json", s.serveSwaggerJSON) httpMux.HandleFunc("/swagger-ui/", s.serveSwaggerUI) + + // Register pprof endpoints (only on testnet) + if s.pprofEnabled { + httpMux.HandleFunc("/debug/pprof/", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/cmdline", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/profile", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/symbol", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/trace", s.pprofHandler) + // Register specific pprof profiles + httpMux.HandleFunc("/debug/pprof/allocs", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/block", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/goroutine", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/heap", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/mutex", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/threadcreate", s.pprofHandler) + + logtrace.Debug(ctx, "Pprof endpoints enabled on gateway", logtrace.Fields{ + "chain_id": s.chainID, + "port": s.port, + }) + } + httpMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/" { http.Redirect(w, r, "/swagger-ui/", http.StatusFound) @@ -87,8 +137,9 @@ func (s *Server) Run(ctx context.Context) error { } logtrace.Debug(ctx, "Starting HTTP gateway server", logtrace.Fields{ - "address": s.ipAddress, - "port": s.port, + "address": s.ipAddress, + "port": s.port, + "pprof_enabled": s.pprofEnabled, }) // Start server @@ -124,3 +175,19 @@ func (s *Server) corsMiddleware(h http.Handler) http.Handler { h.ServeHTTP(w, r) }) } + +// pprofHandler proxies requests to the pprof handlers +func (s *Server) pprofHandler(w http.ResponseWriter, r *http.Request) { + // Check if pprof is enabled + if !s.pprofEnabled { + http.Error(w, "Profiling is not enabled", http.StatusForbidden) + return + } + + // Get the default pprof handler and serve + if handler, pattern := http.DefaultServeMux.Handler(r); pattern != "" { + handler.ServeHTTP(w, r) + } else { + http.NotFound(w, r) + } +} diff --git a/supernode/transport/grpc/status/pprof_handlers.go b/supernode/transport/grpc/status/pprof_handlers.go new file mode 100644 index 00000000..4557e5f6 --- /dev/null +++ b/supernode/transport/grpc/status/pprof_handlers.go @@ -0,0 +1,223 @@ +package server + +import ( + "bytes" + "context" + "fmt" + "os" + "runtime" + "runtime/pprof" + "strings" + "time" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" +) + +// isPprofEnabled checks if pprof should be enabled based on chain ID or environment variable +func (s *SupernodeServer) isPprofEnabled() bool { + // Check if chain ID contains testnet + if s.statusService != nil && s.statusService.GetChainID() != "" { + if strings.Contains(strings.ToLower(s.statusService.GetChainID()), "testnet") { + return true + } + } + + // Check environment variable + return os.Getenv("ENABLE_PPROF") == "true" +} + +// GetPprofIndex returns the pprof index page +func (s *SupernodeServer) GetPprofIndex(ctx context.Context, req *pb.GetPprofIndexRequest) (*pb.GetPprofIndexResponse, error) { + if !s.isPprofEnabled() { + return &pb.GetPprofIndexResponse{ + Html: "", + Enabled: false, + }, nil + } + + // Generate a simple index page with links to available profiles + html := ` + + +Supernode Profiling + + + +

Supernode Profiling

+

Available profiles:

+
    +
  • heap - A sampling of memory allocations of live objects
  • +
  • goroutine - Stack traces of all current goroutines
  • +
  • allocs - A sampling of all past memory allocations
  • +
  • block - Stack traces that led to blocking on synchronization primitives
  • +
  • mutex - Stack traces of holders of contended mutexes
  • +
  • threadcreate - Stack traces that led to the creation of new OS threads
  • +
  • profile - CPU profile (specify ?seconds=30 for duration)
  • +
+ +` + + return &pb.GetPprofIndexResponse{ + Html: html, + Enabled: true, + }, nil +} + +// GetPprofHeap returns the heap profile +func (s *SupernodeServer) GetPprofHeap(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { + return s.getPprofProfile("heap", req.GetDebug()) +} + +// GetPprofGoroutine returns the goroutine profile +func (s *SupernodeServer) GetPprofGoroutine(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { + return s.getPprofProfile("goroutine", req.GetDebug()) +} + +// GetPprofAllocs returns the allocations profile +func (s *SupernodeServer) GetPprofAllocs(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { + return s.getPprofProfile("allocs", req.GetDebug()) +} + +// GetPprofBlock returns the block profile +func (s *SupernodeServer) GetPprofBlock(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { + return s.getPprofProfile("block", req.GetDebug()) +} + +// GetPprofMutex returns the mutex profile +func (s *SupernodeServer) GetPprofMutex(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { + return s.getPprofProfile("mutex", req.GetDebug()) +} + +// GetPprofThreadcreate returns the threadcreate profile +func (s *SupernodeServer) GetPprofThreadcreate(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { + return s.getPprofProfile("threadcreate", req.GetDebug()) +} + +// GetPprofProfile returns the CPU profile +func (s *SupernodeServer) GetPprofProfile(ctx context.Context, req *pb.GetPprofCpuProfileRequest) (*pb.GetPprofProfileResponse, error) { + if !s.isPprofEnabled() { + return &pb.GetPprofProfileResponse{ + Enabled: false, + Error: "Profiling is disabled. Enable on testnet or set ENABLE_PPROF=true", + }, nil + } + + seconds := req.GetSeconds() + if seconds <= 0 { + seconds = 30 // Default to 30 seconds + } + if seconds > 300 { + seconds = 300 // Cap at 5 minutes + } + + var buf bytes.Buffer + if err := pprof.StartCPUProfile(&buf); err != nil { + return &pb.GetPprofProfileResponse{ + Enabled: true, + Error: fmt.Sprintf("Failed to start CPU profile: %v", err), + }, nil + } + + // Profile for the specified duration + time.Sleep(time.Duration(seconds) * time.Second) + pprof.StopCPUProfile() + + return &pb.GetPprofProfileResponse{ + Data: buf.Bytes(), + ContentType: "application/octet-stream", + Enabled: true, + }, nil +} + +// getPprofProfile is a helper function to get various runtime profiles +func (s *SupernodeServer) getPprofProfile(profileType string, debug int32) (*pb.GetPprofProfileResponse, error) { + if !s.isPprofEnabled() { + return &pb.GetPprofProfileResponse{ + Enabled: false, + Error: "Profiling is disabled. Enable on testnet or set ENABLE_PPROF=true", + }, nil + } + + var buf bytes.Buffer + var contentType string + + // Get the appropriate profile + var p *pprof.Profile + switch profileType { + case "heap": + runtime.GC() // Force GC before heap profile + p = pprof.Lookup("heap") + contentType = "application/octet-stream" + case "goroutine": + p = pprof.Lookup("goroutine") + contentType = "application/octet-stream" + case "allocs": + p = pprof.Lookup("allocs") + contentType = "application/octet-stream" + case "block": + p = pprof.Lookup("block") + contentType = "application/octet-stream" + case "mutex": + p = pprof.Lookup("mutex") + contentType = "application/octet-stream" + case "threadcreate": + p = pprof.Lookup("threadcreate") + contentType = "application/octet-stream" + default: + return &pb.GetPprofProfileResponse{ + Enabled: true, + Error: fmt.Sprintf("Unknown profile type: %s", profileType), + }, nil + } + + if p == nil { + return &pb.GetPprofProfileResponse{ + Enabled: true, + Error: fmt.Sprintf("Profile %s not found", profileType), + }, nil + } + + // Write the profile to buffer + // If debug > 0, write in text format for human reading + if debug > 0 { + if err := p.WriteTo(&buf, int(debug)); err != nil { + return &pb.GetPprofProfileResponse{ + Enabled: true, + Error: fmt.Sprintf("Failed to write profile: %v", err), + }, nil + } + contentType = "text/plain" + } else { + // Write in binary pprof format + if err := p.WriteTo(&buf, 0); err != nil { + return &pb.GetPprofProfileResponse{ + Enabled: true, + Error: fmt.Sprintf("Failed to write profile: %v", err), + }, nil + } + } + + return &pb.GetPprofProfileResponse{ + Data: buf.Bytes(), + ContentType: contentType, + Enabled: true, + }, nil +} \ No newline at end of file From ba79a79ec429bcdeece8735b3658f7ef76879574 Mon Sep 17 00:00:00 2001 From: j-rafique Date: Thu, 9 Oct 2025 11:03:00 +0500 Subject: [PATCH 13/36] Route pprof via swagger endpoints --- gen/supernode/service.pb.go | 468 ++++----- gen/supernode/service.pb.gw.go | 925 ++++++++++-------- gen/supernode/service.swagger.json | 215 ++-- gen/supernode/service_grpc.pb.go | 362 ++++--- go.mod | 1 + go.sum | 2 + profile_cascade.sh | 2 +- proto/supernode/service.proto | 76 +- supernode/transport/gateway/server.go | 49 +- supernode/transport/gateway/swagger.json | 864 +++++++++++++++- supernode/transport/grpc/status/handler.go | 8 +- .../transport/grpc/status/pprof_handlers.go | 362 +++---- 12 files changed, 2225 insertions(+), 1109 deletions(-) diff --git a/gen/supernode/service.pb.go b/gen/supernode/service.pb.go index c70a14dc..0c0a5b3a 100644 --- a/gen/supernode/service.pb.go +++ b/gen/supernode/service.pb.go @@ -163,27 +163,29 @@ func (x *ServiceInfo) GetMethods() []string { return nil } -// Pprof message types -type GetPprofIndexRequest struct { +// Raw pprof request/response messages +type RawPprofRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Debug int32 `protobuf:"varint,1,opt,name=debug,proto3" json:"debug,omitempty"` // Debug level (0 for binary, >0 for text) } -func (x *GetPprofIndexRequest) Reset() { - *x = GetPprofIndexRequest{} +func (x *RawPprofRequest) Reset() { + *x = RawPprofRequest{} mi := &file_supernode_service_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *GetPprofIndexRequest) String() string { +func (x *RawPprofRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetPprofIndexRequest) ProtoMessage() {} +func (*RawPprofRequest) ProtoMessage() {} -func (x *GetPprofIndexRequest) ProtoReflect() protoreflect.Message { +func (x *RawPprofRequest) ProtoReflect() protoreflect.Message { mi := &file_supernode_service_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -195,132 +197,41 @@ func (x *GetPprofIndexRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetPprofIndexRequest.ProtoReflect.Descriptor instead. -func (*GetPprofIndexRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use RawPprofRequest.ProtoReflect.Descriptor instead. +func (*RawPprofRequest) Descriptor() ([]byte, []int) { return file_supernode_service_proto_rawDescGZIP(), []int{3} } -type GetPprofIndexResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Html string `protobuf:"bytes,1,opt,name=html,proto3" json:"html,omitempty"` // HTML content for the pprof index page - Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"` // Whether profiling is enabled -} - -func (x *GetPprofIndexResponse) Reset() { - *x = GetPprofIndexResponse{} - mi := &file_supernode_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetPprofIndexResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetPprofIndexResponse) ProtoMessage() {} - -func (x *GetPprofIndexResponse) ProtoReflect() protoreflect.Message { - mi := &file_supernode_service_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetPprofIndexResponse.ProtoReflect.Descriptor instead. -func (*GetPprofIndexResponse) Descriptor() ([]byte, []int) { - return file_supernode_service_proto_rawDescGZIP(), []int{4} -} - -func (x *GetPprofIndexResponse) GetHtml() string { - if x != nil { - return x.Html - } - return "" -} - -func (x *GetPprofIndexResponse) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -type GetPprofProfileRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Debug int32 `protobuf:"varint,1,opt,name=debug,proto3" json:"debug,omitempty"` // Debug level (optional, default 1) -} - -func (x *GetPprofProfileRequest) Reset() { - *x = GetPprofProfileRequest{} - mi := &file_supernode_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetPprofProfileRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetPprofProfileRequest) ProtoMessage() {} - -func (x *GetPprofProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_supernode_service_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetPprofProfileRequest.ProtoReflect.Descriptor instead. -func (*GetPprofProfileRequest) Descriptor() ([]byte, []int) { - return file_supernode_service_proto_rawDescGZIP(), []int{5} -} - -func (x *GetPprofProfileRequest) GetDebug() int32 { +func (x *RawPprofRequest) GetDebug() int32 { if x != nil { return x.Debug } return 0 } -type GetPprofCpuProfileRequest struct { +type RawPprofCpuRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Seconds int32 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // Duration in seconds (optional, default 30) + Seconds int32 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // CPU profile duration in seconds (default 30) } -func (x *GetPprofCpuProfileRequest) Reset() { - *x = GetPprofCpuProfileRequest{} - mi := &file_supernode_service_proto_msgTypes[6] +func (x *RawPprofCpuRequest) Reset() { + *x = RawPprofCpuRequest{} + mi := &file_supernode_service_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *GetPprofCpuProfileRequest) String() string { +func (x *RawPprofCpuRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetPprofCpuProfileRequest) ProtoMessage() {} +func (*RawPprofCpuRequest) ProtoMessage() {} -func (x *GetPprofCpuProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_supernode_service_proto_msgTypes[6] +func (x *RawPprofCpuRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -331,44 +242,41 @@ func (x *GetPprofCpuProfileRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetPprofCpuProfileRequest.ProtoReflect.Descriptor instead. -func (*GetPprofCpuProfileRequest) Descriptor() ([]byte, []int) { - return file_supernode_service_proto_rawDescGZIP(), []int{6} +// Deprecated: Use RawPprofCpuRequest.ProtoReflect.Descriptor instead. +func (*RawPprofCpuRequest) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{4} } -func (x *GetPprofCpuProfileRequest) GetSeconds() int32 { +func (x *RawPprofCpuRequest) GetSeconds() int32 { if x != nil { return x.Seconds } return 0 } -type GetPprofProfileResponse struct { +type RawPprofResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // Profile data (binary pprof format) - ContentType string `protobuf:"bytes,2,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` // Content type of the response - Enabled bool `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"` // Whether profiling is enabled - Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` // Error message if profiling is disabled + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // Raw pprof data exactly as returned by runtime/pprof } -func (x *GetPprofProfileResponse) Reset() { - *x = GetPprofProfileResponse{} - mi := &file_supernode_service_proto_msgTypes[7] +func (x *RawPprofResponse) Reset() { + *x = RawPprofResponse{} + mi := &file_supernode_service_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *GetPprofProfileResponse) String() string { +func (x *RawPprofResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetPprofProfileResponse) ProtoMessage() {} +func (*RawPprofResponse) ProtoMessage() {} -func (x *GetPprofProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_supernode_service_proto_msgTypes[7] +func (x *RawPprofResponse) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -379,39 +287,18 @@ func (x *GetPprofProfileResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetPprofProfileResponse.ProtoReflect.Descriptor instead. -func (*GetPprofProfileResponse) Descriptor() ([]byte, []int) { - return file_supernode_service_proto_rawDescGZIP(), []int{7} +// Deprecated: Use RawPprofResponse.ProtoReflect.Descriptor instead. +func (*RawPprofResponse) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{5} } -func (x *GetPprofProfileResponse) GetData() []byte { +func (x *RawPprofResponse) GetData() []byte { if x != nil { return x.Data } return nil } -func (x *GetPprofProfileResponse) GetContentType() string { - if x != nil { - return x.ContentType - } - return "" -} - -func (x *GetPprofProfileResponse) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *GetPprofProfileResponse) GetError() string { - if x != nil { - return x.Error - } - return "" -} - var File_supernode_service_proto protoreflect.FileDescriptor var file_supernode_service_proto_rawDesc = []byte{ @@ -432,108 +319,113 @@ var file_supernode_service_proto_rawDesc = []byte{ 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, - 0x22, 0x16, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x45, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, - 0x70, 0x72, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x74, 0x6d, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x68, 0x74, 0x6d, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, - 0x2e, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x62, - 0x75, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x22, - 0x35, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x70, 0x75, 0x50, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x80, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x50, 0x70, - 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x32, 0xc5, 0x09, 0x0a, 0x10, 0x53, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, - 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x12, 0x12, 0x10, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x12, 0x6f, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, - 0x13, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x70, - 0x70, 0x72, 0x6f, 0x66, 0x12, 0x77, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, - 0x48, 0x65, 0x61, 0x70, 0x12, 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x1a, 0x12, 0x18, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, - 0x75, 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x68, 0x65, 0x61, 0x70, 0x12, 0x81, 0x01, - 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x47, 0x6f, 0x72, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x65, 0x12, 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x1f, 0x12, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, - 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x65, 0x12, 0x7b, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x41, 0x6c, 0x6c, - 0x6f, 0x63, 0x73, 0x12, 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x1c, 0x12, 0x1a, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, - 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x73, 0x12, 0x79, - 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, - 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, - 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, - 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x70, 0x70, - 0x72, 0x6f, 0x66, 0x2f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x79, 0x0a, 0x0d, 0x47, 0x65, 0x74, - 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4d, 0x75, 0x74, 0x65, 0x78, 0x12, 0x21, 0x2e, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, - 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, - 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x6d, - 0x75, 0x74, 0x65, 0x78, 0x12, 0x87, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, - 0x66, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x21, 0x2e, - 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, - 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, - 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, - 0x66, 0x2f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x80, - 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x12, 0x24, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, - 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x70, 0x75, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, - 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x1d, 0x12, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, - 0x62, 0x75, 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, - 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x22, 0x27, 0x0a, 0x0f, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x22, 0x2e, 0x0a, 0x12, 0x52, 0x61, 0x77, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x70, 0x75, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x26, 0x0a, 0x10, 0x52, 0x61, 0x77, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x32, 0xec, 0x0b, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, + 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x12, 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x67, 0x0a, 0x0b, 0x47, + 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, + 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, + 0x70, 0x72, 0x6f, 0x66, 0x12, 0x70, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x48, 0x65, 0x61, 0x70, 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, + 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, + 0x66, 0x2f, 0x68, 0x65, 0x61, 0x70, 0x12, 0x7a, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x47, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x12, 0x1a, + 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, + 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, + 0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, + 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x65, 0x12, 0x74, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x73, 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, 0x1e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, + 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, + 0x66, 0x2f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x73, 0x12, 0x72, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x52, + 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1a, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x12, 0x1d, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, + 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x72, 0x0a, 0x10, + 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4d, 0x75, 0x74, 0x65, 0x78, + 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x1f, 0x12, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x6d, 0x75, 0x74, 0x65, 0x78, + 0x12, 0x80, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, + 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x12, 0x24, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, + 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x12, 0x79, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, + 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x1d, 0x2e, 0x73, 0x75, 0x70, 0x65, + 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x70, + 0x75, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, + 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x76, + 0x0a, 0x12, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x6d, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x63, + 0x6d, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x74, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x1a, 0x2e, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, 0x1e, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, + 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x72, 0x0a, 0x10, + 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x72, 0x61, 0x63, 0x65, + 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x1f, 0x12, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, + 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -548,43 +440,47 @@ func file_supernode_service_proto_rawDescGZIP() []byte { return file_supernode_service_proto_rawDescData } -var file_supernode_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_supernode_service_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_supernode_service_proto_goTypes = []any{ - (*ListServicesRequest)(nil), // 0: supernode.ListServicesRequest - (*ListServicesResponse)(nil), // 1: supernode.ListServicesResponse - (*ServiceInfo)(nil), // 2: supernode.ServiceInfo - (*GetPprofIndexRequest)(nil), // 3: supernode.GetPprofIndexRequest - (*GetPprofIndexResponse)(nil), // 4: supernode.GetPprofIndexResponse - (*GetPprofProfileRequest)(nil), // 5: supernode.GetPprofProfileRequest - (*GetPprofCpuProfileRequest)(nil), // 6: supernode.GetPprofCpuProfileRequest - (*GetPprofProfileResponse)(nil), // 7: supernode.GetPprofProfileResponse - (*StatusRequest)(nil), // 8: supernode.StatusRequest - (*StatusResponse)(nil), // 9: supernode.StatusResponse + (*ListServicesRequest)(nil), // 0: supernode.ListServicesRequest + (*ListServicesResponse)(nil), // 1: supernode.ListServicesResponse + (*ServiceInfo)(nil), // 2: supernode.ServiceInfo + (*RawPprofRequest)(nil), // 3: supernode.RawPprofRequest + (*RawPprofCpuRequest)(nil), // 4: supernode.RawPprofCpuRequest + (*RawPprofResponse)(nil), // 5: supernode.RawPprofResponse + (*StatusRequest)(nil), // 6: supernode.StatusRequest + (*StatusResponse)(nil), // 7: supernode.StatusResponse } var file_supernode_service_proto_depIdxs = []int32{ 2, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo - 8, // 1: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest + 6, // 1: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest 0, // 2: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest - 3, // 3: supernode.SupernodeService.GetPprofIndex:input_type -> supernode.GetPprofIndexRequest - 5, // 4: supernode.SupernodeService.GetPprofHeap:input_type -> supernode.GetPprofProfileRequest - 5, // 5: supernode.SupernodeService.GetPprofGoroutine:input_type -> supernode.GetPprofProfileRequest - 5, // 6: supernode.SupernodeService.GetPprofAllocs:input_type -> supernode.GetPprofProfileRequest - 5, // 7: supernode.SupernodeService.GetPprofBlock:input_type -> supernode.GetPprofProfileRequest - 5, // 8: supernode.SupernodeService.GetPprofMutex:input_type -> supernode.GetPprofProfileRequest - 5, // 9: supernode.SupernodeService.GetPprofThreadcreate:input_type -> supernode.GetPprofProfileRequest - 6, // 10: supernode.SupernodeService.GetPprofProfile:input_type -> supernode.GetPprofCpuProfileRequest - 9, // 11: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse - 1, // 12: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse - 4, // 13: supernode.SupernodeService.GetPprofIndex:output_type -> supernode.GetPprofIndexResponse - 7, // 14: supernode.SupernodeService.GetPprofHeap:output_type -> supernode.GetPprofProfileResponse - 7, // 15: supernode.SupernodeService.GetPprofGoroutine:output_type -> supernode.GetPprofProfileResponse - 7, // 16: supernode.SupernodeService.GetPprofAllocs:output_type -> supernode.GetPprofProfileResponse - 7, // 17: supernode.SupernodeService.GetPprofBlock:output_type -> supernode.GetPprofProfileResponse - 7, // 18: supernode.SupernodeService.GetPprofMutex:output_type -> supernode.GetPprofProfileResponse - 7, // 19: supernode.SupernodeService.GetPprofThreadcreate:output_type -> supernode.GetPprofProfileResponse - 7, // 20: supernode.SupernodeService.GetPprofProfile:output_type -> supernode.GetPprofProfileResponse - 11, // [11:21] is the sub-list for method output_type - 1, // [1:11] is the sub-list for method input_type + 3, // 3: supernode.SupernodeService.GetRawPprof:input_type -> supernode.RawPprofRequest + 3, // 4: supernode.SupernodeService.GetRawPprofHeap:input_type -> supernode.RawPprofRequest + 3, // 5: supernode.SupernodeService.GetRawPprofGoroutine:input_type -> supernode.RawPprofRequest + 3, // 6: supernode.SupernodeService.GetRawPprofAllocs:input_type -> supernode.RawPprofRequest + 3, // 7: supernode.SupernodeService.GetRawPprofBlock:input_type -> supernode.RawPprofRequest + 3, // 8: supernode.SupernodeService.GetRawPprofMutex:input_type -> supernode.RawPprofRequest + 3, // 9: supernode.SupernodeService.GetRawPprofThreadcreate:input_type -> supernode.RawPprofRequest + 4, // 10: supernode.SupernodeService.GetRawPprofProfile:input_type -> supernode.RawPprofCpuRequest + 3, // 11: supernode.SupernodeService.GetRawPprofCmdline:input_type -> supernode.RawPprofRequest + 3, // 12: supernode.SupernodeService.GetRawPprofSymbol:input_type -> supernode.RawPprofRequest + 3, // 13: supernode.SupernodeService.GetRawPprofTrace:input_type -> supernode.RawPprofRequest + 7, // 14: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse + 1, // 15: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse + 5, // 16: supernode.SupernodeService.GetRawPprof:output_type -> supernode.RawPprofResponse + 5, // 17: supernode.SupernodeService.GetRawPprofHeap:output_type -> supernode.RawPprofResponse + 5, // 18: supernode.SupernodeService.GetRawPprofGoroutine:output_type -> supernode.RawPprofResponse + 5, // 19: supernode.SupernodeService.GetRawPprofAllocs:output_type -> supernode.RawPprofResponse + 5, // 20: supernode.SupernodeService.GetRawPprofBlock:output_type -> supernode.RawPprofResponse + 5, // 21: supernode.SupernodeService.GetRawPprofMutex:output_type -> supernode.RawPprofResponse + 5, // 22: supernode.SupernodeService.GetRawPprofThreadcreate:output_type -> supernode.RawPprofResponse + 5, // 23: supernode.SupernodeService.GetRawPprofProfile:output_type -> supernode.RawPprofResponse + 5, // 24: supernode.SupernodeService.GetRawPprofCmdline:output_type -> supernode.RawPprofResponse + 5, // 25: supernode.SupernodeService.GetRawPprofSymbol:output_type -> supernode.RawPprofResponse + 5, // 26: supernode.SupernodeService.GetRawPprofTrace:output_type -> supernode.RawPprofResponse + 14, // [14:27] is the sub-list for method output_type + 1, // [1:14] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name 1, // [1:1] is the sub-list for extension extendee 0, // [0:1] is the sub-list for field type_name @@ -602,7 +498,7 @@ func file_supernode_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_supernode_service_proto_rawDesc, NumEnums: 0, - NumMessages: 8, + NumMessages: 6, NumExtensions: 0, NumServices: 1, }, diff --git a/gen/supernode/service.pb.gw.go b/gen/supernode/service.pb.gw.go index 9cdfafbc..89e6ca78 100644 --- a/gen/supernode/service.pb.gw.go +++ b/gen/supernode/service.pb.gw.go @@ -10,587 +10,737 @@ package supernode import ( "context" + "errors" "io" "net/http" - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - var ( - filter_SupernodeService_GetStatus_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} + _ codes.Code + _ io.Reader + _ status.Status + _ = errors.New + _ = runtime.String + _ = utilities.NewDoubleArray + _ = metadata.Join ) -func request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatusRequest - var metadata runtime.ServerMetadata +var filter_SupernodeService_GetStatus_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +func request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq StatusRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetStatus_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatusRequest - var metadata runtime.ServerMetadata - + var ( + protoReq StatusRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetStatus_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetStatus(ctx, &protoReq) return msg, metadata, err - } func request_SupernodeService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListServicesRequest - var metadata runtime.ServerMetadata - + var ( + protoReq ListServicesRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } msg, err := client.ListServices(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListServicesRequest - var metadata runtime.ServerMetadata - + var ( + protoReq ListServicesRequest + metadata runtime.ServerMetadata + ) msg, err := server.ListServices(ctx, &protoReq) return msg, metadata, err - } -func request_SupernodeService_GetPprofIndex_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofIndexRequest - var metadata runtime.ServerMetadata +var filter_SupernodeService_GetRawPprof_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} - msg, err := client.GetPprofIndex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) +func request_SupernodeService_GetRawPprof_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprof_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := client.GetRawPprof(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } -func local_request_SupernodeService_GetPprofIndex_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofIndexRequest - var metadata runtime.ServerMetadata - - msg, err := server.GetPprofIndex(ctx, &protoReq) +func local_request_SupernodeService_GetRawPprof_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprof_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.GetRawPprof(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetPprofHeap_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_SupernodeService_GetPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofProfileRequest - var metadata runtime.ServerMetadata +var filter_SupernodeService_GetRawPprofHeap_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +func request_SupernodeService_GetRawPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofHeap_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofHeap_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - msg, err := client.GetPprofHeap(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.GetRawPprofHeap(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } -func local_request_SupernodeService_GetPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofProfileRequest - var metadata runtime.ServerMetadata - +func local_request_SupernodeService_GetRawPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofHeap_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofHeap_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - msg, err := server.GetPprofHeap(ctx, &protoReq) + msg, err := server.GetRawPprofHeap(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetPprofGoroutine_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_SupernodeService_GetPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofProfileRequest - var metadata runtime.ServerMetadata +var filter_SupernodeService_GetRawPprofGoroutine_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +func request_SupernodeService_GetRawPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofGoroutine_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofGoroutine_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - msg, err := client.GetPprofGoroutine(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.GetRawPprofGoroutine(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } -func local_request_SupernodeService_GetPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofProfileRequest - var metadata runtime.ServerMetadata - +func local_request_SupernodeService_GetRawPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofGoroutine_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofGoroutine_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - msg, err := server.GetPprofGoroutine(ctx, &protoReq) + msg, err := server.GetRawPprofGoroutine(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetPprofAllocs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_SupernodeService_GetPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofProfileRequest - var metadata runtime.ServerMetadata +var filter_SupernodeService_GetRawPprofAllocs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +func request_SupernodeService_GetRawPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofAllocs_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofAllocs_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - msg, err := client.GetPprofAllocs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.GetRawPprofAllocs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } -func local_request_SupernodeService_GetPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofProfileRequest - var metadata runtime.ServerMetadata - +func local_request_SupernodeService_GetRawPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofAllocs_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofAllocs_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - msg, err := server.GetPprofAllocs(ctx, &protoReq) + msg, err := server.GetRawPprofAllocs(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetPprofBlock_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_SupernodeService_GetPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofProfileRequest - var metadata runtime.ServerMetadata +var filter_SupernodeService_GetRawPprofBlock_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +func request_SupernodeService_GetRawPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofBlock_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofBlock_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - msg, err := client.GetPprofBlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.GetRawPprofBlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } -func local_request_SupernodeService_GetPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofProfileRequest - var metadata runtime.ServerMetadata - +func local_request_SupernodeService_GetRawPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofBlock_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofBlock_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - msg, err := server.GetPprofBlock(ctx, &protoReq) + msg, err := server.GetRawPprofBlock(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetPprofMutex_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_SupernodeService_GetPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofProfileRequest - var metadata runtime.ServerMetadata +var filter_SupernodeService_GetRawPprofMutex_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +func request_SupernodeService_GetRawPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofMutex_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofMutex_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - msg, err := client.GetPprofMutex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.GetRawPprofMutex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err +} +func local_request_SupernodeService_GetRawPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofMutex_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.GetRawPprofMutex(ctx, &protoReq) + return msg, metadata, err } -func local_request_SupernodeService_GetPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofProfileRequest - var metadata runtime.ServerMetadata +var filter_SupernodeService_GetRawPprofThreadcreate_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +func request_SupernodeService_GetRawPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofMutex_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofThreadcreate_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - msg, err := server.GetPprofMutex(ctx, &protoReq) + msg, err := client.GetRawPprofThreadcreate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } -var ( - filter_SupernodeService_GetPprofThreadcreate_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) +func local_request_SupernodeService_GetRawPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofThreadcreate_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.GetRawPprofThreadcreate(ctx, &protoReq) + return msg, metadata, err +} -func request_SupernodeService_GetPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofProfileRequest - var metadata runtime.ServerMetadata +var filter_SupernodeService_GetRawPprofProfile_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +func request_SupernodeService_GetRawPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofCpuRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofThreadcreate_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofProfile_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - msg, err := client.GetPprofThreadcreate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.GetRawPprofProfile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err +} +func local_request_SupernodeService_GetRawPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofCpuRequest + metadata runtime.ServerMetadata + ) + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofProfile_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.GetRawPprofProfile(ctx, &protoReq) + return msg, metadata, err } -func local_request_SupernodeService_GetPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofProfileRequest - var metadata runtime.ServerMetadata +var filter_SupernodeService_GetRawPprofCmdline_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +func request_SupernodeService_GetRawPprofCmdline_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofThreadcreate_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofCmdline_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - msg, err := server.GetPprofThreadcreate(ctx, &protoReq) + msg, err := client.GetRawPprofCmdline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } -var ( - filter_SupernodeService_GetPprofProfile_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) +func local_request_SupernodeService_GetRawPprofCmdline_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofCmdline_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.GetRawPprofCmdline(ctx, &protoReq) + return msg, metadata, err +} -func request_SupernodeService_GetPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofCpuProfileRequest - var metadata runtime.ServerMetadata +var filter_SupernodeService_GetRawPprofSymbol_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +func request_SupernodeService_GetRawPprofSymbol_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofProfile_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofSymbol_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - msg, err := client.GetPprofProfile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.GetRawPprofSymbol(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err +} +func local_request_SupernodeService_GetRawPprofSymbol_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofSymbol_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.GetRawPprofSymbol(ctx, &protoReq) + return msg, metadata, err } -func local_request_SupernodeService_GetPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPprofCpuProfileRequest - var metadata runtime.ServerMetadata +var filter_SupernodeService_GetRawPprofTrace_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +func request_SupernodeService_GetRawPprofTrace_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofProfile_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofTrace_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - msg, err := server.GetPprofProfile(ctx, &protoReq) + msg, err := client.GetRawPprofTrace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err +} +func local_request_SupernodeService_GetRawPprofTrace_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofTrace_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.GetRawPprofTrace(ctx, &protoReq) + return msg, metadata, err } // RegisterSupernodeServiceHandlerServer registers the http handlers for service SupernodeService to "mux". // UnaryRPC :call SupernodeServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. // Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterSupernodeServiceHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SupernodeServiceServer) error { - - mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_GetStatus_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetStatus_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_SupernodeService_GetStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_ListServices_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_ListServices_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_ListServices_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_SupernodeService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_GetPprofIndex_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetRawPprof_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_SupernodeService_GetRawPprof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_GetPprofHeap_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetRawPprofHeap_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofHeap_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_SupernodeService_GetRawPprofHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_GetPprofGoroutine_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofGoroutine_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_GetPprofAllocs_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetRawPprofAllocs_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofAllocs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_SupernodeService_GetRawPprofAllocs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_GetPprofBlock_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetRawPprofBlock_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofBlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_SupernodeService_GetRawPprofBlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_GetPprofMutex_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetRawPprofMutex_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_SupernodeService_GetRawPprofMutex_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofMutex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + resp, md, err := local_request_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_GetPprofThreadcreate_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetRawPprofProfile_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_SupernodeService_GetRawPprofProfile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofThreadcreate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + resp, md, err := local_request_SupernodeService_GetRawPprofCmdline_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_SupernodeService_GetRawPprofCmdline_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_GetPprofProfile_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetRawPprofSymbol_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_SupernodeService_GetRawPprofSymbol_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofProfile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + resp, md, err := local_request_SupernodeService_GetRawPprofTrace_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_SupernodeService_GetRawPprofTrace_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) return nil @@ -599,25 +749,24 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser // RegisterSupernodeServiceHandlerFromEndpoint is same as RegisterSupernodeServiceHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterSupernodeServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() - return RegisterSupernodeServiceHandler(ctx, mux, conn) } @@ -631,252 +780,260 @@ func RegisterSupernodeServiceHandler(ctx context.Context, mux *runtime.ServeMux, // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "SupernodeServiceClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "SupernodeServiceClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "SupernodeServiceClient" to call the correct interceptors. +// "SupernodeServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares. func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client SupernodeServiceClient) error { - - mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_SupernodeService_GetStatus_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_SupernodeService_GetStatus_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_SupernodeService_GetStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_SupernodeService_ListServices_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_SupernodeService_ListServices_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_ListServices_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_SupernodeService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_SupernodeService_GetPprofIndex_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_SupernodeService_GetRawPprof_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_SupernodeService_GetRawPprof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_SupernodeService_GetPprofHeap_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_SupernodeService_GetRawPprofHeap_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofHeap_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_SupernodeService_GetRawPprofHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_SupernodeService_GetPprofGoroutine_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofGoroutine_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_SupernodeService_GetPprofAllocs_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_SupernodeService_GetRawPprofAllocs_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofAllocs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_SupernodeService_GetRawPprofAllocs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_SupernodeService_GetPprofBlock_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_SupernodeService_GetRawPprofBlock_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofBlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_SupernodeService_GetRawPprofBlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_SupernodeService_GetPprofMutex_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_SupernodeService_GetRawPprofMutex_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_SupernodeService_GetRawPprofMutex_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofMutex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + resp, md, err := request_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_SupernodeService_GetPprofThreadcreate_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_SupernodeService_GetRawPprofProfile_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_SupernodeService_GetRawPprofProfile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofThreadcreate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + resp, md, err := request_SupernodeService_GetRawPprofCmdline_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_SupernodeService_GetRawPprofCmdline_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("GET", pattern_SupernodeService_GetPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_SupernodeService_GetPprofProfile_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_SupernodeService_GetRawPprofSymbol_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_SupernodeService_GetRawPprofSymbol_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - - forward_SupernodeService_GetPprofProfile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + resp, md, err := request_SupernodeService_GetRawPprofTrace_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_SupernodeService_GetRawPprofTrace_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - return nil } var ( - pattern_SupernodeService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_SupernodeService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "services"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_SupernodeService_GetPprofIndex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "debug", "pprof"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_SupernodeService_GetPprofHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "heap"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_SupernodeService_GetPprofGoroutine_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "goroutine"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_SupernodeService_GetPprofAllocs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "allocs"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_SupernodeService_GetPprofBlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "block"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_SupernodeService_GetPprofMutex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "mutex"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_SupernodeService_GetPprofThreadcreate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "threadcreate"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_SupernodeService_GetPprofProfile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "profile"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_SupernodeService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "")) + pattern_SupernodeService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "services"}, "")) + pattern_SupernodeService_GetRawPprof_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "raw", "pprof"}, "")) + pattern_SupernodeService_GetRawPprofHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "heap"}, "")) + pattern_SupernodeService_GetRawPprofGoroutine_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "goroutine"}, "")) + pattern_SupernodeService_GetRawPprofAllocs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "allocs"}, "")) + pattern_SupernodeService_GetRawPprofBlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "block"}, "")) + pattern_SupernodeService_GetRawPprofMutex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "mutex"}, "")) + pattern_SupernodeService_GetRawPprofThreadcreate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "threadcreate"}, "")) + pattern_SupernodeService_GetRawPprofProfile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "profile"}, "")) + pattern_SupernodeService_GetRawPprofCmdline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "cmdline"}, "")) + pattern_SupernodeService_GetRawPprofSymbol_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "symbol"}, "")) + pattern_SupernodeService_GetRawPprofTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "trace"}, "")) ) var ( - forward_SupernodeService_GetStatus_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_ListServices_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetPprofIndex_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetPprofHeap_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetPprofGoroutine_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetPprofAllocs_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetPprofBlock_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetPprofMutex_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetPprofThreadcreate_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetPprofProfile_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetStatus_0 = runtime.ForwardResponseMessage + forward_SupernodeService_ListServices_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprof_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofHeap_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofGoroutine_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofAllocs_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofBlock_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofMutex_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofThreadcreate_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofProfile_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofCmdline_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofSymbol_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofTrace_0 = runtime.ForwardResponseMessage ) diff --git a/gen/supernode/service.swagger.json b/gen/supernode/service.swagger.json index 6e04d0b9..c3944e9d 100644 --- a/gen/supernode/service.swagger.json +++ b/gen/supernode/service.swagger.json @@ -16,15 +16,15 @@ "application/json" ], "paths": { - "/api/v1/debug/pprof": { + "/api/v1/debug/raw/pprof": { "get": { - "summary": "Profiling endpoints", - "operationId": "SupernodeService_GetPprofIndex", + "summary": "Raw pprof endpoints - return standard pprof output directly", + "operationId": "SupernodeService_GetRawPprof", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/supernodeGetPprofIndexResponse" + "$ref": "#/definitions/supernodeRawPprofResponse" } }, "default": { @@ -34,19 +34,29 @@ } } }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], "tags": [ "SupernodeService" ] } }, - "/api/v1/debug/pprof/allocs": { + "/api/v1/debug/raw/pprof/allocs": { "get": { - "operationId": "SupernodeService_GetPprofAllocs", + "operationId": "SupernodeService_GetRawPprofAllocs", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/supernodeGetPprofProfileResponse" + "$ref": "#/definitions/supernodeRawPprofResponse" } }, "default": { @@ -59,7 +69,7 @@ "parameters": [ { "name": "debug", - "description": "Debug level (optional, default 1)", + "description": "Debug level (0 for binary, \u003e0 for text)", "in": "query", "required": false, "type": "integer", @@ -71,14 +81,14 @@ ] } }, - "/api/v1/debug/pprof/block": { + "/api/v1/debug/raw/pprof/block": { "get": { - "operationId": "SupernodeService_GetPprofBlock", + "operationId": "SupernodeService_GetRawPprofBlock", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/supernodeGetPprofProfileResponse" + "$ref": "#/definitions/supernodeRawPprofResponse" } }, "default": { @@ -91,7 +101,7 @@ "parameters": [ { "name": "debug", - "description": "Debug level (optional, default 1)", + "description": "Debug level (0 for binary, \u003e0 for text)", "in": "query", "required": false, "type": "integer", @@ -103,14 +113,14 @@ ] } }, - "/api/v1/debug/pprof/goroutine": { + "/api/v1/debug/raw/pprof/cmdline": { "get": { - "operationId": "SupernodeService_GetPprofGoroutine", + "operationId": "SupernodeService_GetRawPprofCmdline", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/supernodeGetPprofProfileResponse" + "$ref": "#/definitions/supernodeRawPprofResponse" } }, "default": { @@ -123,7 +133,7 @@ "parameters": [ { "name": "debug", - "description": "Debug level (optional, default 1)", + "description": "Debug level (0 for binary, \u003e0 for text)", "in": "query", "required": false, "type": "integer", @@ -135,14 +145,14 @@ ] } }, - "/api/v1/debug/pprof/heap": { + "/api/v1/debug/raw/pprof/goroutine": { "get": { - "operationId": "SupernodeService_GetPprofHeap", + "operationId": "SupernodeService_GetRawPprofGoroutine", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/supernodeGetPprofProfileResponse" + "$ref": "#/definitions/supernodeRawPprofResponse" } }, "default": { @@ -155,7 +165,7 @@ "parameters": [ { "name": "debug", - "description": "Debug level (optional, default 1)", + "description": "Debug level (0 for binary, \u003e0 for text)", "in": "query", "required": false, "type": "integer", @@ -167,14 +177,14 @@ ] } }, - "/api/v1/debug/pprof/mutex": { + "/api/v1/debug/raw/pprof/heap": { "get": { - "operationId": "SupernodeService_GetPprofMutex", + "operationId": "SupernodeService_GetRawPprofHeap", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/supernodeGetPprofProfileResponse" + "$ref": "#/definitions/supernodeRawPprofResponse" } }, "default": { @@ -187,7 +197,7 @@ "parameters": [ { "name": "debug", - "description": "Debug level (optional, default 1)", + "description": "Debug level (0 for binary, \u003e0 for text)", "in": "query", "required": false, "type": "integer", @@ -199,14 +209,46 @@ ] } }, - "/api/v1/debug/pprof/profile": { + "/api/v1/debug/raw/pprof/mutex": { "get": { - "operationId": "SupernodeService_GetPprofProfile", + "operationId": "SupernodeService_GetRawPprofMutex", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/supernodeGetPprofProfileResponse" + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/profile": { + "get": { + "operationId": "SupernodeService_GetRawPprofProfile", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" } }, "default": { @@ -219,7 +261,7 @@ "parameters": [ { "name": "seconds", - "description": "Duration in seconds (optional, default 30)", + "description": "CPU profile duration in seconds (default 30)", "in": "query", "required": false, "type": "integer", @@ -231,14 +273,14 @@ ] } }, - "/api/v1/debug/pprof/threadcreate": { + "/api/v1/debug/raw/pprof/symbol": { "get": { - "operationId": "SupernodeService_GetPprofThreadcreate", + "operationId": "SupernodeService_GetRawPprofSymbol", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/supernodeGetPprofProfileResponse" + "$ref": "#/definitions/supernodeRawPprofResponse" } }, "default": { @@ -251,7 +293,71 @@ "parameters": [ { "name": "debug", - "description": "Debug level (optional, default 1)", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/threadcreate": { + "get": { + "operationId": "SupernodeService_GetRawPprofThreadcreate", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/trace": { + "get": { + "operationId": "SupernodeService_GetRawPprofTrace", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", "in": "query", "required": false, "type": "integer", @@ -666,41 +772,6 @@ } } }, - "supernodeGetPprofIndexResponse": { - "type": "object", - "properties": { - "html": { - "type": "string", - "title": "HTML content for the pprof index page" - }, - "enabled": { - "type": "boolean", - "title": "Whether profiling is enabled" - } - } - }, - "supernodeGetPprofProfileResponse": { - "type": "object", - "properties": { - "data": { - "type": "string", - "format": "byte", - "title": "Profile data (binary pprof format)" - }, - "contentType": { - "type": "string", - "title": "Content type of the response" - }, - "enabled": { - "type": "boolean", - "title": "Whether profiling is enabled" - }, - "error": { - "type": "string", - "title": "Error message if profiling is disabled" - } - } - }, "supernodeListServicesResponse": { "type": "object", "properties": { @@ -717,6 +788,16 @@ } } }, + "supernodeRawPprofResponse": { + "type": "object", + "properties": { + "data": { + "type": "string", + "format": "byte", + "title": "Raw pprof data exactly as returned by runtime/pprof" + } + } + }, "supernodeServiceInfo": { "type": "object", "properties": { diff --git a/gen/supernode/service_grpc.pb.go b/gen/supernode/service_grpc.pb.go index 98db9323..42857bf2 100644 --- a/gen/supernode/service_grpc.pb.go +++ b/gen/supernode/service_grpc.pb.go @@ -19,16 +19,19 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - SupernodeService_GetStatus_FullMethodName = "/supernode.SupernodeService/GetStatus" - SupernodeService_ListServices_FullMethodName = "/supernode.SupernodeService/ListServices" - SupernodeService_GetPprofIndex_FullMethodName = "/supernode.SupernodeService/GetPprofIndex" - SupernodeService_GetPprofHeap_FullMethodName = "/supernode.SupernodeService/GetPprofHeap" - SupernodeService_GetPprofGoroutine_FullMethodName = "/supernode.SupernodeService/GetPprofGoroutine" - SupernodeService_GetPprofAllocs_FullMethodName = "/supernode.SupernodeService/GetPprofAllocs" - SupernodeService_GetPprofBlock_FullMethodName = "/supernode.SupernodeService/GetPprofBlock" - SupernodeService_GetPprofMutex_FullMethodName = "/supernode.SupernodeService/GetPprofMutex" - SupernodeService_GetPprofThreadcreate_FullMethodName = "/supernode.SupernodeService/GetPprofThreadcreate" - SupernodeService_GetPprofProfile_FullMethodName = "/supernode.SupernodeService/GetPprofProfile" + SupernodeService_GetStatus_FullMethodName = "/supernode.SupernodeService/GetStatus" + SupernodeService_ListServices_FullMethodName = "/supernode.SupernodeService/ListServices" + SupernodeService_GetRawPprof_FullMethodName = "/supernode.SupernodeService/GetRawPprof" + SupernodeService_GetRawPprofHeap_FullMethodName = "/supernode.SupernodeService/GetRawPprofHeap" + SupernodeService_GetRawPprofGoroutine_FullMethodName = "/supernode.SupernodeService/GetRawPprofGoroutine" + SupernodeService_GetRawPprofAllocs_FullMethodName = "/supernode.SupernodeService/GetRawPprofAllocs" + SupernodeService_GetRawPprofBlock_FullMethodName = "/supernode.SupernodeService/GetRawPprofBlock" + SupernodeService_GetRawPprofMutex_FullMethodName = "/supernode.SupernodeService/GetRawPprofMutex" + SupernodeService_GetRawPprofThreadcreate_FullMethodName = "/supernode.SupernodeService/GetRawPprofThreadcreate" + SupernodeService_GetRawPprofProfile_FullMethodName = "/supernode.SupernodeService/GetRawPprofProfile" + SupernodeService_GetRawPprofCmdline_FullMethodName = "/supernode.SupernodeService/GetRawPprofCmdline" + SupernodeService_GetRawPprofSymbol_FullMethodName = "/supernode.SupernodeService/GetRawPprofSymbol" + SupernodeService_GetRawPprofTrace_FullMethodName = "/supernode.SupernodeService/GetRawPprofTrace" ) // SupernodeServiceClient is the client API for SupernodeService service. @@ -39,15 +42,18 @@ const ( type SupernodeServiceClient interface { GetStatus(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) - // Profiling endpoints - GetPprofIndex(ctx context.Context, in *GetPprofIndexRequest, opts ...grpc.CallOption) (*GetPprofIndexResponse, error) - GetPprofHeap(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) - GetPprofGoroutine(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) - GetPprofAllocs(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) - GetPprofBlock(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) - GetPprofMutex(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) - GetPprofThreadcreate(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) - GetPprofProfile(ctx context.Context, in *GetPprofCpuProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) + // Raw pprof endpoints - return standard pprof output directly + GetRawPprof(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofHeap(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofGoroutine(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofAllocs(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofBlock(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofMutex(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofThreadcreate(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofProfile(ctx context.Context, in *RawPprofCpuRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofCmdline(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofSymbol(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofTrace(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) } type supernodeServiceClient struct { @@ -78,80 +84,110 @@ func (c *supernodeServiceClient) ListServices(ctx context.Context, in *ListServi return out, nil } -func (c *supernodeServiceClient) GetPprofIndex(ctx context.Context, in *GetPprofIndexRequest, opts ...grpc.CallOption) (*GetPprofIndexResponse, error) { +func (c *supernodeServiceClient) GetRawPprof(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetPprofIndexResponse) - err := c.cc.Invoke(ctx, SupernodeService_GetPprofIndex_FullMethodName, in, out, cOpts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprof_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *supernodeServiceClient) GetPprofHeap(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { +func (c *supernodeServiceClient) GetRawPprofHeap(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetPprofProfileResponse) - err := c.cc.Invoke(ctx, SupernodeService_GetPprofHeap_FullMethodName, in, out, cOpts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofHeap_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *supernodeServiceClient) GetPprofGoroutine(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { +func (c *supernodeServiceClient) GetRawPprofGoroutine(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetPprofProfileResponse) - err := c.cc.Invoke(ctx, SupernodeService_GetPprofGoroutine_FullMethodName, in, out, cOpts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofGoroutine_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *supernodeServiceClient) GetPprofAllocs(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { +func (c *supernodeServiceClient) GetRawPprofAllocs(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetPprofProfileResponse) - err := c.cc.Invoke(ctx, SupernodeService_GetPprofAllocs_FullMethodName, in, out, cOpts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofAllocs_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *supernodeServiceClient) GetPprofBlock(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { +func (c *supernodeServiceClient) GetRawPprofBlock(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetPprofProfileResponse) - err := c.cc.Invoke(ctx, SupernodeService_GetPprofBlock_FullMethodName, in, out, cOpts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofBlock_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *supernodeServiceClient) GetPprofMutex(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { +func (c *supernodeServiceClient) GetRawPprofMutex(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetPprofProfileResponse) - err := c.cc.Invoke(ctx, SupernodeService_GetPprofMutex_FullMethodName, in, out, cOpts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofMutex_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *supernodeServiceClient) GetPprofThreadcreate(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { +func (c *supernodeServiceClient) GetRawPprofThreadcreate(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetPprofProfileResponse) - err := c.cc.Invoke(ctx, SupernodeService_GetPprofThreadcreate_FullMethodName, in, out, cOpts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofThreadcreate_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *supernodeServiceClient) GetPprofProfile(ctx context.Context, in *GetPprofCpuProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { +func (c *supernodeServiceClient) GetRawPprofProfile(ctx context.Context, in *RawPprofCpuRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetPprofProfileResponse) - err := c.cc.Invoke(ctx, SupernodeService_GetPprofProfile_FullMethodName, in, out, cOpts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofProfile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofCmdline(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofCmdline_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofSymbol(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofSymbol_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofTrace(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofTrace_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -166,15 +202,18 @@ func (c *supernodeServiceClient) GetPprofProfile(ctx context.Context, in *GetPpr type SupernodeServiceServer interface { GetStatus(context.Context, *StatusRequest) (*StatusResponse, error) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) - // Profiling endpoints - GetPprofIndex(context.Context, *GetPprofIndexRequest) (*GetPprofIndexResponse, error) - GetPprofHeap(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) - GetPprofGoroutine(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) - GetPprofAllocs(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) - GetPprofBlock(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) - GetPprofMutex(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) - GetPprofThreadcreate(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) - GetPprofProfile(context.Context, *GetPprofCpuProfileRequest) (*GetPprofProfileResponse, error) + // Raw pprof endpoints - return standard pprof output directly + GetRawPprof(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofHeap(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofGoroutine(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofAllocs(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofBlock(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofMutex(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofThreadcreate(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofProfile(context.Context, *RawPprofCpuRequest) (*RawPprofResponse, error) + GetRawPprofCmdline(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofSymbol(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofTrace(context.Context, *RawPprofRequest) (*RawPprofResponse, error) mustEmbedUnimplementedSupernodeServiceServer() } @@ -191,29 +230,38 @@ func (UnimplementedSupernodeServiceServer) GetStatus(context.Context, *StatusReq func (UnimplementedSupernodeServiceServer) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListServices not implemented") } -func (UnimplementedSupernodeServiceServer) GetPprofIndex(context.Context, *GetPprofIndexRequest) (*GetPprofIndexResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPprofIndex not implemented") +func (UnimplementedSupernodeServiceServer) GetRawPprof(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprof not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofHeap(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofHeap not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofGoroutine(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofGoroutine not implemented") } -func (UnimplementedSupernodeServiceServer) GetPprofHeap(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPprofHeap not implemented") +func (UnimplementedSupernodeServiceServer) GetRawPprofAllocs(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofAllocs not implemented") } -func (UnimplementedSupernodeServiceServer) GetPprofGoroutine(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPprofGoroutine not implemented") +func (UnimplementedSupernodeServiceServer) GetRawPprofBlock(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofBlock not implemented") } -func (UnimplementedSupernodeServiceServer) GetPprofAllocs(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPprofAllocs not implemented") +func (UnimplementedSupernodeServiceServer) GetRawPprofMutex(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofMutex not implemented") } -func (UnimplementedSupernodeServiceServer) GetPprofBlock(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPprofBlock not implemented") +func (UnimplementedSupernodeServiceServer) GetRawPprofThreadcreate(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofThreadcreate not implemented") } -func (UnimplementedSupernodeServiceServer) GetPprofMutex(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPprofMutex not implemented") +func (UnimplementedSupernodeServiceServer) GetRawPprofProfile(context.Context, *RawPprofCpuRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofProfile not implemented") } -func (UnimplementedSupernodeServiceServer) GetPprofThreadcreate(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPprofThreadcreate not implemented") +func (UnimplementedSupernodeServiceServer) GetRawPprofCmdline(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofCmdline not implemented") } -func (UnimplementedSupernodeServiceServer) GetPprofProfile(context.Context, *GetPprofCpuProfileRequest) (*GetPprofProfileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPprofProfile not implemented") +func (UnimplementedSupernodeServiceServer) GetRawPprofSymbol(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofSymbol not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofTrace(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofTrace not implemented") } func (UnimplementedSupernodeServiceServer) mustEmbedUnimplementedSupernodeServiceServer() {} func (UnimplementedSupernodeServiceServer) testEmbeddedByValue() {} @@ -272,146 +320,200 @@ func _SupernodeService_ListServices_Handler(srv interface{}, ctx context.Context return interceptor(ctx, in, info, handler) } -func _SupernodeService_GetPprofIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPprofIndexRequest) +func _SupernodeService_GetRawPprof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(SupernodeServiceServer).GetPprofIndex(ctx, in) + return srv.(SupernodeServiceServer).GetRawPprof(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SupernodeService_GetPprofIndex_FullMethodName, + FullMethod: SupernodeService_GetRawPprof_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SupernodeServiceServer).GetPprofIndex(ctx, req.(*GetPprofIndexRequest)) + return srv.(SupernodeServiceServer).GetRawPprof(ctx, req.(*RawPprofRequest)) } return interceptor(ctx, in, info, handler) } -func _SupernodeService_GetPprofHeap_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPprofProfileRequest) +func _SupernodeService_GetRawPprofHeap_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(SupernodeServiceServer).GetPprofHeap(ctx, in) + return srv.(SupernodeServiceServer).GetRawPprofHeap(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SupernodeService_GetPprofHeap_FullMethodName, + FullMethod: SupernodeService_GetRawPprofHeap_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SupernodeServiceServer).GetPprofHeap(ctx, req.(*GetPprofProfileRequest)) + return srv.(SupernodeServiceServer).GetRawPprofHeap(ctx, req.(*RawPprofRequest)) } return interceptor(ctx, in, info, handler) } -func _SupernodeService_GetPprofGoroutine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPprofProfileRequest) +func _SupernodeService_GetRawPprofGoroutine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(SupernodeServiceServer).GetPprofGoroutine(ctx, in) + return srv.(SupernodeServiceServer).GetRawPprofGoroutine(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SupernodeService_GetPprofGoroutine_FullMethodName, + FullMethod: SupernodeService_GetRawPprofGoroutine_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SupernodeServiceServer).GetPprofGoroutine(ctx, req.(*GetPprofProfileRequest)) + return srv.(SupernodeServiceServer).GetRawPprofGoroutine(ctx, req.(*RawPprofRequest)) } return interceptor(ctx, in, info, handler) } -func _SupernodeService_GetPprofAllocs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPprofProfileRequest) +func _SupernodeService_GetRawPprofAllocs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(SupernodeServiceServer).GetPprofAllocs(ctx, in) + return srv.(SupernodeServiceServer).GetRawPprofAllocs(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SupernodeService_GetPprofAllocs_FullMethodName, + FullMethod: SupernodeService_GetRawPprofAllocs_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SupernodeServiceServer).GetPprofAllocs(ctx, req.(*GetPprofProfileRequest)) + return srv.(SupernodeServiceServer).GetRawPprofAllocs(ctx, req.(*RawPprofRequest)) } return interceptor(ctx, in, info, handler) } -func _SupernodeService_GetPprofBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPprofProfileRequest) +func _SupernodeService_GetRawPprofBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(SupernodeServiceServer).GetPprofBlock(ctx, in) + return srv.(SupernodeServiceServer).GetRawPprofBlock(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SupernodeService_GetPprofBlock_FullMethodName, + FullMethod: SupernodeService_GetRawPprofBlock_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SupernodeServiceServer).GetPprofBlock(ctx, req.(*GetPprofProfileRequest)) + return srv.(SupernodeServiceServer).GetRawPprofBlock(ctx, req.(*RawPprofRequest)) } return interceptor(ctx, in, info, handler) } -func _SupernodeService_GetPprofMutex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPprofProfileRequest) +func _SupernodeService_GetRawPprofMutex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(SupernodeServiceServer).GetPprofMutex(ctx, in) + return srv.(SupernodeServiceServer).GetRawPprofMutex(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SupernodeService_GetPprofMutex_FullMethodName, + FullMethod: SupernodeService_GetRawPprofMutex_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SupernodeServiceServer).GetPprofMutex(ctx, req.(*GetPprofProfileRequest)) + return srv.(SupernodeServiceServer).GetRawPprofMutex(ctx, req.(*RawPprofRequest)) } return interceptor(ctx, in, info, handler) } -func _SupernodeService_GetPprofThreadcreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPprofProfileRequest) +func _SupernodeService_GetRawPprofThreadcreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(SupernodeServiceServer).GetPprofThreadcreate(ctx, in) + return srv.(SupernodeServiceServer).GetRawPprofThreadcreate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SupernodeService_GetPprofThreadcreate_FullMethodName, + FullMethod: SupernodeService_GetRawPprofThreadcreate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SupernodeServiceServer).GetPprofThreadcreate(ctx, req.(*GetPprofProfileRequest)) + return srv.(SupernodeServiceServer).GetRawPprofThreadcreate(ctx, req.(*RawPprofRequest)) } return interceptor(ctx, in, info, handler) } -func _SupernodeService_GetPprofProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPprofCpuProfileRequest) +func _SupernodeService_GetRawPprofProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofCpuRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(SupernodeServiceServer).GetPprofProfile(ctx, in) + return srv.(SupernodeServiceServer).GetRawPprofProfile(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SupernodeService_GetPprofProfile_FullMethodName, + FullMethod: SupernodeService_GetRawPprofProfile_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SupernodeServiceServer).GetPprofProfile(ctx, req.(*GetPprofCpuProfileRequest)) + return srv.(SupernodeServiceServer).GetRawPprofProfile(ctx, req.(*RawPprofCpuRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofCmdline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofCmdline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofCmdline_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofCmdline(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofSymbol_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofSymbol(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofSymbol_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofSymbol(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofTrace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofTrace(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofTrace_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofTrace(ctx, req.(*RawPprofRequest)) } return interceptor(ctx, in, info, handler) } @@ -432,36 +534,48 @@ var SupernodeService_ServiceDesc = grpc.ServiceDesc{ Handler: _SupernodeService_ListServices_Handler, }, { - MethodName: "GetPprofIndex", - Handler: _SupernodeService_GetPprofIndex_Handler, + MethodName: "GetRawPprof", + Handler: _SupernodeService_GetRawPprof_Handler, + }, + { + MethodName: "GetRawPprofHeap", + Handler: _SupernodeService_GetRawPprofHeap_Handler, + }, + { + MethodName: "GetRawPprofGoroutine", + Handler: _SupernodeService_GetRawPprofGoroutine_Handler, + }, + { + MethodName: "GetRawPprofAllocs", + Handler: _SupernodeService_GetRawPprofAllocs_Handler, }, { - MethodName: "GetPprofHeap", - Handler: _SupernodeService_GetPprofHeap_Handler, + MethodName: "GetRawPprofBlock", + Handler: _SupernodeService_GetRawPprofBlock_Handler, }, { - MethodName: "GetPprofGoroutine", - Handler: _SupernodeService_GetPprofGoroutine_Handler, + MethodName: "GetRawPprofMutex", + Handler: _SupernodeService_GetRawPprofMutex_Handler, }, { - MethodName: "GetPprofAllocs", - Handler: _SupernodeService_GetPprofAllocs_Handler, + MethodName: "GetRawPprofThreadcreate", + Handler: _SupernodeService_GetRawPprofThreadcreate_Handler, }, { - MethodName: "GetPprofBlock", - Handler: _SupernodeService_GetPprofBlock_Handler, + MethodName: "GetRawPprofProfile", + Handler: _SupernodeService_GetRawPprofProfile_Handler, }, { - MethodName: "GetPprofMutex", - Handler: _SupernodeService_GetPprofMutex_Handler, + MethodName: "GetRawPprofCmdline", + Handler: _SupernodeService_GetRawPprofCmdline_Handler, }, { - MethodName: "GetPprofThreadcreate", - Handler: _SupernodeService_GetPprofThreadcreate_Handler, + MethodName: "GetRawPprofSymbol", + Handler: _SupernodeService_GetRawPprofSymbol_Handler, }, { - MethodName: "GetPprofProfile", - Handler: _SupernodeService_GetPprofProfile_Handler, + MethodName: "GetRawPprofTrace", + Handler: _SupernodeService_GetRawPprofTrace_Handler, }, }, Streams: []grpc.StreamDesc{}, diff --git a/go.mod b/go.mod index a581736e..1bbdf7bb 100644 --- a/go.mod +++ b/go.mod @@ -25,6 +25,7 @@ require ( github.com/golang/protobuf v1.5.4 github.com/google/uuid v1.6.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 github.com/jmoiron/sqlx v1.4.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.18.0 diff --git a/go.sum b/go.sum index 839f29a2..ed0db1f6 100644 --- a/go.sum +++ b/go.sum @@ -415,6 +415,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= diff --git a/profile_cascade.sh b/profile_cascade.sh index 7fe0af5e..9b6fe71a 100755 --- a/profile_cascade.sh +++ b/profile_cascade.sh @@ -4,7 +4,7 @@ # Samples heap every 30 seconds during cascade downloads # Configuration - modify these as needed -PROFILE_URL="http://localhost:6062/debug/pprof/heap" +PROFILE_URL="http://localhost:8002/api/v1/debug/raw/pprof/heap" INTERVAL=30 TIMESTAMP=$(date +%Y%m%d_%H%M%S) PROFILE_DIR="profiles_${TIMESTAMP}" diff --git a/proto/supernode/service.proto b/proto/supernode/service.proto index 05d071d4..d51de355 100644 --- a/proto/supernode/service.proto +++ b/proto/supernode/service.proto @@ -19,52 +19,70 @@ service SupernodeService { }; } - // Profiling endpoints - rpc GetPprofIndex(GetPprofIndexRequest) returns (GetPprofIndexResponse) { + // Raw pprof endpoints - return standard pprof output directly + rpc GetRawPprof(RawPprofRequest) returns (RawPprofResponse) { option (google.api.http) = { - get: "/api/v1/debug/pprof" + get: "/api/v1/debug/raw/pprof" }; } - rpc GetPprofHeap(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + rpc GetRawPprofHeap(RawPprofRequest) returns (RawPprofResponse) { option (google.api.http) = { - get: "/api/v1/debug/pprof/heap" + get: "/api/v1/debug/raw/pprof/heap" }; } - rpc GetPprofGoroutine(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + rpc GetRawPprofGoroutine(RawPprofRequest) returns (RawPprofResponse) { option (google.api.http) = { - get: "/api/v1/debug/pprof/goroutine" + get: "/api/v1/debug/raw/pprof/goroutine" }; } - rpc GetPprofAllocs(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + rpc GetRawPprofAllocs(RawPprofRequest) returns (RawPprofResponse) { option (google.api.http) = { - get: "/api/v1/debug/pprof/allocs" + get: "/api/v1/debug/raw/pprof/allocs" }; } - rpc GetPprofBlock(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + rpc GetRawPprofBlock(RawPprofRequest) returns (RawPprofResponse) { option (google.api.http) = { - get: "/api/v1/debug/pprof/block" + get: "/api/v1/debug/raw/pprof/block" }; } - rpc GetPprofMutex(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + rpc GetRawPprofMutex(RawPprofRequest) returns (RawPprofResponse) { option (google.api.http) = { - get: "/api/v1/debug/pprof/mutex" + get: "/api/v1/debug/raw/pprof/mutex" }; } - rpc GetPprofThreadcreate(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + rpc GetRawPprofThreadcreate(RawPprofRequest) returns (RawPprofResponse) { option (google.api.http) = { - get: "/api/v1/debug/pprof/threadcreate" + get: "/api/v1/debug/raw/pprof/threadcreate" }; } - rpc GetPprofProfile(GetPprofCpuProfileRequest) returns (GetPprofProfileResponse) { + rpc GetRawPprofProfile(RawPprofCpuRequest) returns (RawPprofResponse) { option (google.api.http) = { - get: "/api/v1/debug/pprof/profile" + get: "/api/v1/debug/raw/pprof/profile" + }; + } + + rpc GetRawPprofCmdline(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/cmdline" + }; + } + + rpc GetRawPprofSymbol(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/symbol" + }; + } + + rpc GetRawPprofTrace(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/trace" }; } } @@ -81,26 +99,16 @@ message ServiceInfo { repeated string methods = 2; } -// Pprof message types -message GetPprofIndexRequest {} - -message GetPprofIndexResponse { - string html = 1; // HTML content for the pprof index page - bool enabled = 2; // Whether profiling is enabled -} - -message GetPprofProfileRequest { - int32 debug = 1; // Debug level (optional, default 1) +// Raw pprof request/response messages +message RawPprofRequest { + int32 debug = 1; // Debug level (0 for binary, >0 for text) } -message GetPprofCpuProfileRequest { - int32 seconds = 1; // Duration in seconds (optional, default 30) +message RawPprofCpuRequest { + int32 seconds = 1; // CPU profile duration in seconds (default 30) } -message GetPprofProfileResponse { - bytes data = 1; // Profile data (binary pprof format) - string content_type = 2; // Content type of the response - bool enabled = 3; // Whether profiling is enabled - string error = 4; // Error message if profiling is disabled +message RawPprofResponse { + bytes data = 1; // Raw pprof data exactly as returned by runtime/pprof } diff --git a/supernode/transport/gateway/server.go b/supernode/transport/gateway/server.go index e306539a..5c4df034 100644 --- a/supernode/transport/gateway/server.go +++ b/supernode/transport/gateway/server.go @@ -11,7 +11,8 @@ import ( "strings" "time" - "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "google.golang.org/protobuf/encoding/protojson" pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" @@ -77,8 +78,10 @@ func (s *Server) Run(ctx context.Context) error { // Create gRPC-Gateway mux with custom JSON marshaler options mux := runtime.NewServeMux( runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.JSONPb{ - EmitDefaults: true, // This ensures zero values are included - OrigName: true, // Use original proto field names + MarshalOptions: protojson.MarshalOptions{ + EmitUnpopulated: true, // This ensures zero values are included + UseProtoNames: true, // Use original proto field names + }, }), ) @@ -91,6 +94,23 @@ func (s *Server) Run(ctx context.Context) error { // Create HTTP mux for custom endpoints httpMux := http.NewServeMux() + // Register raw pprof endpoints BEFORE the gRPC gateway to intercept them + // These must be registered before the /api/ handler to take precedence + if s.pprofEnabled { + // Raw pprof endpoints that return actual pprof data (not JSON) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/heap", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/goroutine", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/allocs", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/block", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/mutex", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/threadcreate", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/profile", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/cmdline", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/symbol", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/trace", s.rawPprofHandler) + } + // Register gRPC-Gateway endpoints httpMux.Handle("/api/", mux) @@ -191,3 +211,26 @@ func (s *Server) pprofHandler(w http.ResponseWriter, r *http.Request) { http.NotFound(w, r) } } + +// rawPprofHandler handles the raw pprof endpoints that return actual pprof data +func (s *Server) rawPprofHandler(w http.ResponseWriter, r *http.Request) { + // Check if pprof is enabled + if !s.pprofEnabled { + http.Error(w, "Profiling is not enabled", http.StatusForbidden) + return + } + + // Map the /api/v1/debug/raw/pprof/* path to /debug/pprof/* + originalPath := r.URL.Path + r.URL.Path = strings.Replace(originalPath, "/api/v1/debug/raw/pprof", "/debug/pprof", 1) + + // Get the default pprof handler and serve + if handler, pattern := http.DefaultServeMux.Handler(r); pattern != "" { + handler.ServeHTTP(w, r) + } else { + http.NotFound(w, r) + } + + // Restore the original path + r.URL.Path = originalPath +} diff --git a/supernode/transport/gateway/swagger.json b/supernode/transport/gateway/swagger.json index 0a40a447..c3944e9d 100644 --- a/supernode/transport/gateway/swagger.json +++ b/supernode/transport/gateway/swagger.json @@ -1,79 +1,857 @@ { - "openapi": "3.0.0", + "swagger": "2.0", "info": { - "title": "Supernode API", - "version": "1.0.0", - "description": "Supernode status and metrics API" + "title": "supernode/service.proto", + "version": "version not set" }, + "tags": [ + { + "name": "SupernodeService" + } + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], "paths": { - "/api/v1/status": { + "/api/v1/debug/raw/pprof": { "get": { - "summary": "Get supernode status", - "description": "Returns system, network, and optional P2P metrics", + "summary": "Raw pprof endpoints - return standard pprof output directly", + "operationId": "SupernodeService_GetRawPprof", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, "parameters": [ { - "name": "include_p2p_metrics", + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", "in": "query", - "schema": { "type": "boolean" }, "required": false, - "description": "Optional: include detailed P2P metrics in the response\nMaps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true" + "type": "integer", + "format": "int32" } ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/allocs": { + "get": { + "operationId": "SupernodeService_GetRawPprofAllocs", "responses": { "200": { - "description": "Status response", - "content": { - "application/json": { - "schema": { "$ref": "#/components/schemas/StatusResponse" } - } + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" } } - } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/block": { + "get": { + "operationId": "SupernodeService_GetRawPprofBlock", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/cmdline": { + "get": { + "operationId": "SupernodeService_GetRawPprofCmdline", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/goroutine": { + "get": { + "operationId": "SupernodeService_GetRawPprofGoroutine", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/heap": { + "get": { + "operationId": "SupernodeService_GetRawPprofHeap", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/mutex": { + "get": { + "operationId": "SupernodeService_GetRawPprofMutex", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/profile": { + "get": { + "operationId": "SupernodeService_GetRawPprofProfile", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "seconds", + "description": "CPU profile duration in seconds (default 30)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/symbol": { + "get": { + "operationId": "SupernodeService_GetRawPprofSymbol", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/threadcreate": { + "get": { + "operationId": "SupernodeService_GetRawPprofThreadcreate", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/trace": { + "get": { + "operationId": "SupernodeService_GetRawPprofTrace", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] } }, "/api/v1/services": { "get": { - "summary": "List services", - "description": "Returns available gRPC services and their methods/streams", + "operationId": "SupernodeService_ListServices", "responses": { "200": { - "description": "Services response", - "content": { - "application/json": { - "schema": { "$ref": "#/components/schemas/ListServicesResponse" } - } + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeListServicesResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" } } - } + }, + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/status": { + "get": { + "operationId": "SupernodeService_GetStatus", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeStatusResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "includeP2pMetrics", + "description": "Optional: include detailed P2P metrics in the response\nMaps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true", + "in": "query", + "required": false, + "type": "boolean" + } + ], + "tags": [ + "SupernodeService" + ] } } }, - "components": { - "schemas": { - "StatusResponse": { - "type": "object", - "properties": { - "version": { "type": "string" }, - "uptimeSeconds": { "type": "integer" } + "definitions": { + "DhtMetricsBatchRetrievePoint": { + "type": "object", + "properties": { + "timeUnix": { + "type": "string", + "format": "int64", + "title": "event time (unix seconds)" + }, + "keys": { + "type": "integer", + "format": "int32", + "title": "keys requested" + }, + "required": { + "type": "integer", + "format": "int32", + "title": "required count" + }, + "foundLocal": { + "type": "integer", + "format": "int32", + "title": "found locally" + }, + "foundNetwork": { + "type": "integer", + "format": "int32", + "title": "found on network" + }, + "durationMs": { + "type": "string", + "format": "int64", + "title": "duration in milliseconds" + } + } + }, + "DhtMetricsStoreSuccessPoint": { + "type": "object", + "properties": { + "timeUnix": { + "type": "string", + "format": "int64", + "title": "event time (unix seconds)" + }, + "requests": { + "type": "integer", + "format": "int32", + "title": "total node RPCs attempted" + }, + "successful": { + "type": "integer", + "format": "int32", + "title": "successful node RPCs" + }, + "successRate": { + "type": "number", + "format": "double", + "title": "percentage (0-100)" + } + } + }, + "P2PMetricsBanEntry": { + "type": "object", + "properties": { + "id": { + "type": "string", + "title": "printable ID" + }, + "ip": { + "type": "string", + "title": "last seen IP" + }, + "port": { + "type": "integer", + "format": "int64", + "title": "last seen port" + }, + "count": { + "type": "integer", + "format": "int32", + "title": "failure count" + }, + "createdAtUnix": { + "type": "string", + "format": "int64", + "title": "first ban time (unix seconds)" + }, + "ageSeconds": { + "type": "string", + "format": "int64", + "title": "age in seconds" + } + }, + "title": "Ban list entry" + }, + "P2PMetricsDatabaseStats": { + "type": "object", + "properties": { + "p2pDbSizeMb": { + "type": "number", + "format": "double" + }, + "p2pDbRecordsCount": { + "type": "string", + "format": "int64" + } + }, + "title": "DB stats" + }, + "P2PMetricsDhtMetrics": { + "type": "object", + "properties": { + "storeSuccessRecent": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/DhtMetricsStoreSuccessPoint" + } + }, + "batchRetrieveRecent": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/DhtMetricsBatchRetrievePoint" + } + }, + "hotPathBannedSkips": { + "type": "string", + "format": "int64", + "title": "counter" + }, + "hotPathBanIncrements": { + "type": "string", + "format": "int64", + "title": "counter" + } + }, + "title": "Rolling DHT metrics snapshot" + }, + "P2PMetricsDiskStatus": { + "type": "object", + "properties": { + "allMb": { + "type": "number", + "format": "double" + }, + "usedMb": { + "type": "number", + "format": "double" + }, + "freeMb": { + "type": "number", + "format": "double" } }, - "ListServicesResponse": { - "type": "object", - "properties": { - "services": { - "type": "array", - "items": { "$ref": "#/components/schemas/ServiceInfo" } + "title": "Disk status" + }, + "P2PMetricsHandleCounters": { + "type": "object", + "properties": { + "total": { + "type": "string", + "format": "int64" + }, + "success": { + "type": "string", + "format": "int64" + }, + "failure": { + "type": "string", + "format": "int64" + }, + "timeout": { + "type": "string", + "format": "int64" + } + }, + "title": "Per-handler counters from network layer" + }, + "ResourcesCPU": { + "type": "object", + "properties": { + "usagePercent": { + "type": "number", + "format": "double", + "title": "CPU usage percentage (0-100)" + }, + "cores": { + "type": "integer", + "format": "int32", + "title": "Number of CPU cores" + } + } + }, + "ResourcesMemory": { + "type": "object", + "properties": { + "totalGb": { + "type": "number", + "format": "double", + "title": "Total memory in GB" + }, + "usedGb": { + "type": "number", + "format": "double", + "title": "Used memory in GB" + }, + "availableGb": { + "type": "number", + "format": "double", + "title": "Available memory in GB" + }, + "usagePercent": { + "type": "number", + "format": "double", + "title": "Memory usage percentage (0-100)" + } + } + }, + "ResourcesStorage": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Storage path being monitored" + }, + "totalBytes": { + "type": "string", + "format": "uint64" + }, + "usedBytes": { + "type": "string", + "format": "uint64" + }, + "availableBytes": { + "type": "string", + "format": "uint64" + }, + "usagePercent": { + "type": "number", + "format": "double", + "title": "Storage usage percentage (0-100)" + } + } + }, + "StatusResponseNetwork": { + "type": "object", + "properties": { + "peersCount": { + "type": "integer", + "format": "int32", + "title": "Number of connected peers in P2P network" + }, + "peerAddresses": { + "type": "array", + "items": { + "type": "string" }, - "count": { "type": "integer" } + "title": "List of connected peer addresses (optional, may be empty for privacy)" + } + }, + "title": "Network information" + }, + "StatusResponseP2PMetrics": { + "type": "object", + "properties": { + "dhtMetrics": { + "$ref": "#/definitions/P2PMetricsDhtMetrics" + }, + "networkHandleMetrics": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/P2PMetricsHandleCounters" + } + }, + "connPoolMetrics": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "int64" + } + }, + "banList": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/P2PMetricsBanEntry" + } + }, + "database": { + "$ref": "#/definitions/P2PMetricsDatabaseStats" + }, + "disk": { + "$ref": "#/definitions/P2PMetricsDiskStatus" } }, - "ServiceInfo": { - "type": "object", - "properties": { - "name": { "type": "string" }, - "methods": { "type": "array", "items": { "type": "string" } } + "title": "P2P metrics and diagnostics (additive field)" + }, + "StatusResponseResources": { + "type": "object", + "properties": { + "cpu": { + "$ref": "#/definitions/ResourcesCPU" + }, + "memory": { + "$ref": "#/definitions/ResourcesMemory" + }, + "storageVolumes": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/ResourcesStorage" + } + }, + "hardwareSummary": { + "type": "string", + "title": "Formatted hardware summary (e.g., \"8 cores / 32GB RAM\")" + } + }, + "title": "System resource information" + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string" + } + }, + "additionalProperties": {} + }, + "rpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + } } } + }, + "supernodeListServicesResponse": { + "type": "object", + "properties": { + "services": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/supernodeServiceInfo" + } + }, + "count": { + "type": "integer", + "format": "int32" + } + } + }, + "supernodeRawPprofResponse": { + "type": "object", + "properties": { + "data": { + "type": "string", + "format": "byte", + "title": "Raw pprof data exactly as returned by runtime/pprof" + } + } + }, + "supernodeServiceInfo": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "methods": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "supernodeStatusResponse": { + "type": "object", + "properties": { + "version": { + "type": "string", + "title": "Supernode version" + }, + "uptimeSeconds": { + "type": "string", + "format": "uint64", + "title": "Uptime in seconds" + }, + "resources": { + "$ref": "#/definitions/StatusResponseResources" + }, + "registeredServices": { + "type": "array", + "items": { + "type": "string" + }, + "title": "All registered/available services" + }, + "network": { + "$ref": "#/definitions/StatusResponseNetwork", + "title": "P2P network information" + }, + "rank": { + "type": "integer", + "format": "int32", + "title": "Rank in the top supernodes list (0 if not in top list)" + }, + "ipAddress": { + "type": "string", + "title": "Supernode IP address with port (e.g., \"192.168.1.1:4445\")" + }, + "p2pMetrics": { + "$ref": "#/definitions/StatusResponseP2PMetrics" + } + }, + "title": "The StatusResponse represents system status with clear organization" } } } diff --git a/supernode/transport/grpc/status/handler.go b/supernode/transport/grpc/status/handler.go index 4e120279..e543e7b1 100644 --- a/supernode/transport/grpc/status/handler.go +++ b/supernode/transport/grpc/status/handler.go @@ -12,11 +12,17 @@ import ( type SupernodeServer struct { pb.UnimplementedSupernodeServiceServer statusService *statussvc.SupernodeStatusService + gatewayPort int } // NewSupernodeServer creates a new SupernodeServer func NewSupernodeServer(statusService *statussvc.SupernodeStatusService) *SupernodeServer { - return &SupernodeServer{statusService: statusService} + return &SupernodeServer{statusService: statusService, gatewayPort: 8002} +} + +// SetGatewayPort sets the gateway port for internal proxy requests +func (s *SupernodeServer) SetGatewayPort(port int) { + s.gatewayPort = port } // GetStatus implements SupernodeService.GetStatus diff --git a/supernode/transport/grpc/status/pprof_handlers.go b/supernode/transport/grpc/status/pprof_handlers.go index 4557e5f6..3c8defed 100644 --- a/supernode/transport/grpc/status/pprof_handlers.go +++ b/supernode/transport/grpc/status/pprof_handlers.go @@ -1,14 +1,12 @@ package server import ( - "bytes" "context" "fmt" + "io" + "net/http" "os" - "runtime" - "runtime/pprof" "strings" - "time" pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" ) @@ -26,198 +24,230 @@ func (s *SupernodeServer) isPprofEnabled() bool { return os.Getenv("ENABLE_PPROF") == "true" } -// GetPprofIndex returns the pprof index page -func (s *SupernodeServer) GetPprofIndex(ctx context.Context, req *pb.GetPprofIndexRequest) (*pb.GetPprofIndexResponse, error) { - if !s.isPprofEnabled() { - return &pb.GetPprofIndexResponse{ - Html: "", - Enabled: false, - }, nil - } - - // Generate a simple index page with links to available profiles - html := ` - - -Supernode Profiling - - - -

Supernode Profiling

-

Available profiles:

-
    -
  • heap - A sampling of memory allocations of live objects
  • -
  • goroutine - Stack traces of all current goroutines
  • -
  • allocs - A sampling of all past memory allocations
  • -
  • block - Stack traces that led to blocking on synchronization primitives
  • -
  • mutex - Stack traces of holders of contended mutexes
  • -
  • threadcreate - Stack traces that led to the creation of new OS threads
  • -
  • profile - CPU profile (specify ?seconds=30 for duration)
  • -
- -` - - return &pb.GetPprofIndexResponse{ - Html: html, - Enabled: true, - }, nil + +// GetRawPprof returns the pprof index +func (s *SupernodeServer) GetRawPprof(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte("Profiling disabled")}, nil + } + + data, err := s.pprofProxy("/", "") + if err != nil { + return &pb.RawPprofResponse{Data: []byte(fmt.Sprintf("Error: %v", err))}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil } -// GetPprofHeap returns the heap profile -func (s *SupernodeServer) GetPprofHeap(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { - return s.getPprofProfile("heap", req.GetDebug()) +// GetRawPprofHeap returns raw heap profile +func (s *SupernodeServer) GetRawPprofHeap(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/heap", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil } -// GetPprofGoroutine returns the goroutine profile -func (s *SupernodeServer) GetPprofGoroutine(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { - return s.getPprofProfile("goroutine", req.GetDebug()) +// GetRawPprofGoroutine returns raw goroutine profile +func (s *SupernodeServer) GetRawPprofGoroutine(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/goroutine", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil } -// GetPprofAllocs returns the allocations profile -func (s *SupernodeServer) GetPprofAllocs(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { - return s.getPprofProfile("allocs", req.GetDebug()) +// GetRawPprofAllocs returns raw allocations profile +func (s *SupernodeServer) GetRawPprofAllocs(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/allocs", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil } -// GetPprofBlock returns the block profile -func (s *SupernodeServer) GetPprofBlock(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { - return s.getPprofProfile("block", req.GetDebug()) +// GetRawPprofBlock returns raw block profile +func (s *SupernodeServer) GetRawPprofBlock(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/block", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil } -// GetPprofMutex returns the mutex profile -func (s *SupernodeServer) GetPprofMutex(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { - return s.getPprofProfile("mutex", req.GetDebug()) +// GetRawPprofMutex returns raw mutex profile +func (s *SupernodeServer) GetRawPprofMutex(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/mutex", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil } -// GetPprofThreadcreate returns the threadcreate profile -func (s *SupernodeServer) GetPprofThreadcreate(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { - return s.getPprofProfile("threadcreate", req.GetDebug()) +// GetRawPprofThreadcreate returns raw threadcreate profile +func (s *SupernodeServer) GetRawPprofThreadcreate(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/threadcreate", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil } -// GetPprofProfile returns the CPU profile -func (s *SupernodeServer) GetPprofProfile(ctx context.Context, req *pb.GetPprofCpuProfileRequest) (*pb.GetPprofProfileResponse, error) { +// GetRawPprofProfile returns raw CPU profile +func (s *SupernodeServer) GetRawPprofProfile(ctx context.Context, req *pb.RawPprofCpuRequest) (*pb.RawPprofResponse, error) { if !s.isPprofEnabled() { - return &pb.GetPprofProfileResponse{ - Enabled: false, - Error: "Profiling is disabled. Enable on testnet or set ENABLE_PPROF=true", - }, nil + return &pb.RawPprofResponse{Data: []byte{}}, nil } seconds := req.GetSeconds() if seconds <= 0 { - seconds = 30 // Default to 30 seconds + seconds = 30 } if seconds > 300 { - seconds = 300 // Cap at 5 minutes + seconds = 300 + } + + queryParams := fmt.Sprintf("seconds=%d", seconds) + data, err := s.pprofProxy("/profile", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil } - var buf bytes.Buffer - if err := pprof.StartCPUProfile(&buf); err != nil { - return &pb.GetPprofProfileResponse{ - Enabled: true, - Error: fmt.Sprintf("Failed to start CPU profile: %v", err), - }, nil + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofCmdline returns the command line +func (s *SupernodeServer) GetRawPprofCmdline(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil } - // Profile for the specified duration - time.Sleep(time.Duration(seconds) * time.Second) - pprof.StopCPUProfile() + data, err := s.pprofProxy("/cmdline", "") + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } - return &pb.GetPprofProfileResponse{ - Data: buf.Bytes(), - ContentType: "application/octet-stream", - Enabled: true, - }, nil + return &pb.RawPprofResponse{Data: data}, nil } -// getPprofProfile is a helper function to get various runtime profiles -func (s *SupernodeServer) getPprofProfile(profileType string, debug int32) (*pb.GetPprofProfileResponse, error) { +// GetRawPprofSymbol returns symbol information +func (s *SupernodeServer) GetRawPprofSymbol(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { if !s.isPprofEnabled() { - return &pb.GetPprofProfileResponse{ - Enabled: false, - Error: "Profiling is disabled. Enable on testnet or set ENABLE_PPROF=true", - }, nil - } - - var buf bytes.Buffer - var contentType string - - // Get the appropriate profile - var p *pprof.Profile - switch profileType { - case "heap": - runtime.GC() // Force GC before heap profile - p = pprof.Lookup("heap") - contentType = "application/octet-stream" - case "goroutine": - p = pprof.Lookup("goroutine") - contentType = "application/octet-stream" - case "allocs": - p = pprof.Lookup("allocs") - contentType = "application/octet-stream" - case "block": - p = pprof.Lookup("block") - contentType = "application/octet-stream" - case "mutex": - p = pprof.Lookup("mutex") - contentType = "application/octet-stream" - case "threadcreate": - p = pprof.Lookup("threadcreate") - contentType = "application/octet-stream" - default: - return &pb.GetPprofProfileResponse{ - Enabled: true, - Error: fmt.Sprintf("Unknown profile type: %s", profileType), - }, nil - } - - if p == nil { - return &pb.GetPprofProfileResponse{ - Enabled: true, - Error: fmt.Sprintf("Profile %s not found", profileType), - }, nil - } - - // Write the profile to buffer - // If debug > 0, write in text format for human reading - if debug > 0 { - if err := p.WriteTo(&buf, int(debug)); err != nil { - return &pb.GetPprofProfileResponse{ - Enabled: true, - Error: fmt.Sprintf("Failed to write profile: %v", err), - }, nil - } - contentType = "text/plain" - } else { - // Write in binary pprof format - if err := p.WriteTo(&buf, 0); err != nil { - return &pb.GetPprofProfileResponse{ - Enabled: true, - Error: fmt.Sprintf("Failed to write profile: %v", err), - }, nil - } + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + data, err := s.pprofProxy("/symbol", "") + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofTrace returns execution trace +func (s *SupernodeServer) GetRawPprofTrace(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + // Trace typically takes a seconds parameter + queryParams := "seconds=1" + data, err := s.pprofProxy("/trace", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil } - return &pb.GetPprofProfileResponse{ - Data: buf.Bytes(), - ContentType: contentType, - Enabled: true, - }, nil + return &pb.RawPprofResponse{Data: data}, nil } \ No newline at end of file From d8c9a88892502d01fa24c89c2e2ac38648b971fb Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Thu, 9 Oct 2025 15:39:13 +0500 Subject: [PATCH 14/36] fix : go mod; regenrate proto --- gen/supernode/action/cascade/service.pb.go | 253 ++++---- gen/supernode/service.pb.go | 209 ++----- gen/supernode/service.pb.gw.go | 644 +++++++++++++-------- gen/supernode/status.pb.go | 480 ++++++--------- go.mod | 2 +- pkg/codec/codec_default_test.go | 8 +- 6 files changed, 750 insertions(+), 846 deletions(-) diff --git a/gen/supernode/action/cascade/service.pb.go b/gen/supernode/action/cascade/service.pb.go index dd083d04..f270a051 100644 --- a/gen/supernode/action/cascade/service.pb.go +++ b/gen/supernode/action/cascade/service.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.21.12 // source: supernode/action/cascade/service.proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -116,15 +117,14 @@ func (SupernodeEventType) EnumDescriptor() ([]byte, []int) { } type RegisterRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to RequestType: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to RequestType: // // *RegisterRequest_Chunk // *RegisterRequest_Metadata - RequestType isRegisterRequest_RequestType `protobuf_oneof:"request_type"` + RequestType isRegisterRequest_RequestType `protobuf_oneof:"request_type"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RegisterRequest) Reset() { @@ -157,23 +157,27 @@ func (*RegisterRequest) Descriptor() ([]byte, []int) { return file_supernode_action_cascade_service_proto_rawDescGZIP(), []int{0} } -func (m *RegisterRequest) GetRequestType() isRegisterRequest_RequestType { - if m != nil { - return m.RequestType +func (x *RegisterRequest) GetRequestType() isRegisterRequest_RequestType { + if x != nil { + return x.RequestType } return nil } func (x *RegisterRequest) GetChunk() *DataChunk { - if x, ok := x.GetRequestType().(*RegisterRequest_Chunk); ok { - return x.Chunk + if x != nil { + if x, ok := x.RequestType.(*RegisterRequest_Chunk); ok { + return x.Chunk + } } return nil } func (x *RegisterRequest) GetMetadata() *Metadata { - if x, ok := x.GetRequestType().(*RegisterRequest_Metadata); ok { - return x.Metadata + if x != nil { + if x, ok := x.RequestType.(*RegisterRequest_Metadata); ok { + return x.Metadata + } } return nil } @@ -195,11 +199,10 @@ func (*RegisterRequest_Chunk) isRegisterRequest_RequestType() {} func (*RegisterRequest_Metadata) isRegisterRequest_RequestType() {} type DataChunk struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` unknownFields protoimpl.UnknownFields - - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DataChunk) Reset() { @@ -240,12 +243,11 @@ func (x *DataChunk) GetData() []byte { } type Metadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + ActionId string `protobuf:"bytes,2,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` unknownFields protoimpl.UnknownFields - - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - ActionId string `protobuf:"bytes,2,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Metadata) Reset() { @@ -293,13 +295,12 @@ func (x *Metadata) GetActionId() string { } type RegisterResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + EventType SupernodeEventType `protobuf:"varint,1,opt,name=event_type,json=eventType,proto3,enum=cascade.SupernodeEventType" json:"event_type,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + TxHash string `protobuf:"bytes,3,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` unknownFields protoimpl.UnknownFields - - EventType SupernodeEventType `protobuf:"varint,1,opt,name=event_type,json=eventType,proto3,enum=cascade.SupernodeEventType" json:"event_type,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - TxHash string `protobuf:"bytes,3,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RegisterResponse) Reset() { @@ -354,12 +355,11 @@ func (x *RegisterResponse) GetTxHash() string { } type DownloadRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` + Signature string `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` unknownFields protoimpl.UnknownFields - - ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` - Signature string `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DownloadRequest) Reset() { @@ -407,15 +407,14 @@ func (x *DownloadRequest) GetSignature() string { } type DownloadResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to ResponseType: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to ResponseType: // // *DownloadResponse_Event // *DownloadResponse_Chunk - ResponseType isDownloadResponse_ResponseType `protobuf_oneof:"response_type"` + ResponseType isDownloadResponse_ResponseType `protobuf_oneof:"response_type"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DownloadResponse) Reset() { @@ -448,23 +447,27 @@ func (*DownloadResponse) Descriptor() ([]byte, []int) { return file_supernode_action_cascade_service_proto_rawDescGZIP(), []int{5} } -func (m *DownloadResponse) GetResponseType() isDownloadResponse_ResponseType { - if m != nil { - return m.ResponseType +func (x *DownloadResponse) GetResponseType() isDownloadResponse_ResponseType { + if x != nil { + return x.ResponseType } return nil } func (x *DownloadResponse) GetEvent() *DownloadEvent { - if x, ok := x.GetResponseType().(*DownloadResponse_Event); ok { - return x.Event + if x != nil { + if x, ok := x.ResponseType.(*DownloadResponse_Event); ok { + return x.Event + } } return nil } func (x *DownloadResponse) GetChunk() *DataChunk { - if x, ok := x.GetResponseType().(*DownloadResponse_Chunk); ok { - return x.Chunk + if x != nil { + if x, ok := x.ResponseType.(*DownloadResponse_Chunk); ok { + return x.Chunk + } } return nil } @@ -486,12 +489,11 @@ func (*DownloadResponse_Event) isDownloadResponse_ResponseType() {} func (*DownloadResponse_Chunk) isDownloadResponse_ResponseType() {} type DownloadEvent struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + EventType SupernodeEventType `protobuf:"varint,1,opt,name=event_type,json=eventType,proto3,enum=cascade.SupernodeEventType" json:"event_type,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` unknownFields protoimpl.UnknownFields - - EventType SupernodeEventType `protobuf:"varint,1,opt,name=event_type,json=eventType,proto3,enum=cascade.SupernodeEventType" json:"event_type,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DownloadEvent) Reset() { @@ -540,104 +542,66 @@ func (x *DownloadEvent) GetMessage() string { var File_supernode_action_cascade_service_proto protoreflect.FileDescriptor -var file_supernode_action_cascade_service_proto_rawDesc = []byte{ - 0x0a, 0x26, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2f, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, - 0x65, 0x22, 0x7e, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x44, 0x61, - 0x74, 0x61, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x48, 0x00, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, - 0x12, 0x2f, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x42, 0x0e, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x22, 0x1f, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x12, - 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, - 0x74, 0x61, 0x22, 0x40, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x17, - 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x81, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, - 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, - 0x17, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x22, 0x4c, 0x0a, 0x0f, 0x44, 0x6f, 0x77, 0x6e, - 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x7f, 0x0a, 0x10, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x61, 0x73, 0x63, - 0x61, 0x64, 0x65, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x48, 0x00, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x63, 0x68, - 0x75, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x61, 0x73, 0x63, - 0x61, 0x64, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x48, 0x00, 0x52, - 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x42, 0x0f, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x65, 0x0a, 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x6c, - 0x6f, 0x61, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x63, - 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2a, 0xb3, - 0x03, 0x0a, 0x12, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x54, - 0x52, 0x49, 0x45, 0x56, 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x43, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x46, 0x45, 0x45, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x54, 0x4f, 0x50, 0x5f, 0x53, 0x55, 0x50, 0x45, 0x52, 0x4e, 0x4f, - 0x44, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x45, 0x44, 0x10, - 0x03, 0x12, 0x14, 0x0a, 0x10, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x44, 0x45, - 0x43, 0x4f, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x41, 0x54, 0x41, 0x5f, - 0x48, 0x41, 0x53, 0x48, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x05, 0x12, - 0x11, 0x0a, 0x0d, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x45, 0x44, - 0x10, 0x06, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x49, 0x47, 0x4e, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, - 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x51, - 0x49, 0x44, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x11, - 0x0a, 0x0d, 0x52, 0x51, 0x49, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x09, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x5a, 0x45, 0x5f, 0x53, 0x49, - 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x52, 0x54, - 0x45, 0x46, 0x41, 0x43, 0x54, 0x53, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x44, 0x10, 0x0b, 0x12, - 0x14, 0x0a, 0x10, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x49, 0x4e, 0x41, 0x4c, 0x49, - 0x5a, 0x45, 0x44, 0x10, 0x0c, 0x12, 0x18, 0x0a, 0x14, 0x41, 0x52, 0x54, 0x45, 0x46, 0x41, 0x43, - 0x54, 0x53, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x4c, 0x4f, 0x41, 0x44, 0x45, 0x44, 0x10, 0x0d, 0x12, - 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x5a, 0x45, 0x5f, 0x53, 0x49, 0x4d, 0x55, - 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x0e, 0x12, - 0x1c, 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, - 0x45, 0x56, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0f, 0x12, 0x14, 0x0a, - 0x10, 0x44, 0x45, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, - 0x44, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x45, 0x52, 0x56, 0x45, 0x5f, 0x52, 0x45, 0x41, - 0x44, 0x59, 0x10, 0x11, 0x32, 0x98, 0x01, 0x0a, 0x0e, 0x43, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x65, 0x72, 0x12, 0x18, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x52, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, - 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x41, 0x0a, 0x08, - 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x18, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, - 0x64, 0x65, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x44, 0x6f, 0x77, - 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, - 0x45, 0x5a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, - 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, - 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_supernode_action_cascade_service_proto_rawDesc = "" + + "\n" + + "&supernode/action/cascade/service.proto\x12\acascade\"~\n" + + "\x0fRegisterRequest\x12*\n" + + "\x05chunk\x18\x01 \x01(\v2\x12.cascade.DataChunkH\x00R\x05chunk\x12/\n" + + "\bmetadata\x18\x02 \x01(\v2\x11.cascade.MetadataH\x00R\bmetadataB\x0e\n" + + "\frequest_type\"\x1f\n" + + "\tDataChunk\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data\"@\n" + + "\bMetadata\x12\x17\n" + + "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" + + "\taction_id\x18\x02 \x01(\tR\bactionId\"\x81\x01\n" + + "\x10RegisterResponse\x12:\n" + + "\n" + + "event_type\x18\x01 \x01(\x0e2\x1b.cascade.SupernodeEventTypeR\teventType\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage\x12\x17\n" + + "\atx_hash\x18\x03 \x01(\tR\x06txHash\"L\n" + + "\x0fDownloadRequest\x12\x1b\n" + + "\taction_id\x18\x01 \x01(\tR\bactionId\x12\x1c\n" + + "\tsignature\x18\x02 \x01(\tR\tsignature\"\x7f\n" + + "\x10DownloadResponse\x12.\n" + + "\x05event\x18\x01 \x01(\v2\x16.cascade.DownloadEventH\x00R\x05event\x12*\n" + + "\x05chunk\x18\x02 \x01(\v2\x12.cascade.DataChunkH\x00R\x05chunkB\x0f\n" + + "\rresponse_type\"e\n" + + "\rDownloadEvent\x12:\n" + + "\n" + + "event_type\x18\x01 \x01(\x0e2\x1b.cascade.SupernodeEventTypeR\teventType\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage*\xb3\x03\n" + + "\x12SupernodeEventType\x12\v\n" + + "\aUNKNOWN\x10\x00\x12\x14\n" + + "\x10ACTION_RETRIEVED\x10\x01\x12\x17\n" + + "\x13ACTION_FEE_VERIFIED\x10\x02\x12\x1e\n" + + "\x1aTOP_SUPERNODE_CHECK_PASSED\x10\x03\x12\x14\n" + + "\x10METADATA_DECODED\x10\x04\x12\x16\n" + + "\x12DATA_HASH_VERIFIED\x10\x05\x12\x11\n" + + "\rINPUT_ENCODED\x10\x06\x12\x16\n" + + "\x12SIGNATURE_VERIFIED\x10\a\x12\x12\n" + + "\x0eRQID_GENERATED\x10\b\x12\x11\n" + + "\rRQID_VERIFIED\x10\t\x12\x16\n" + + "\x12FINALIZE_SIMULATED\x10\n" + + "\x12\x14\n" + + "\x10ARTEFACTS_STORED\x10\v\x12\x14\n" + + "\x10ACTION_FINALIZED\x10\f\x12\x18\n" + + "\x14ARTEFACTS_DOWNLOADED\x10\r\x12\x1e\n" + + "\x1aFINALIZE_SIMULATION_FAILED\x10\x0e\x12\x1c\n" + + "\x18NETWORK_RETRIEVE_STARTED\x10\x0f\x12\x14\n" + + "\x10DECODE_COMPLETED\x10\x10\x12\x0f\n" + + "\vSERVE_READY\x10\x112\x98\x01\n" + + "\x0eCascadeService\x12C\n" + + "\bRegister\x12\x18.cascade.RegisterRequest\x1a\x19.cascade.RegisterResponse(\x010\x01\x12A\n" + + "\bDownload\x12\x18.cascade.DownloadRequest\x1a\x19.cascade.DownloadResponse0\x01BEZCgithub.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascadeb\x06proto3" var ( file_supernode_action_cascade_service_proto_rawDescOnce sync.Once - file_supernode_action_cascade_service_proto_rawDescData = file_supernode_action_cascade_service_proto_rawDesc + file_supernode_action_cascade_service_proto_rawDescData []byte ) func file_supernode_action_cascade_service_proto_rawDescGZIP() []byte { file_supernode_action_cascade_service_proto_rawDescOnce.Do(func() { - file_supernode_action_cascade_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_action_cascade_service_proto_rawDescData) + file_supernode_action_cascade_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_supernode_action_cascade_service_proto_rawDesc), len(file_supernode_action_cascade_service_proto_rawDesc))) }) return file_supernode_action_cascade_service_proto_rawDescData } @@ -689,7 +653,7 @@ func file_supernode_action_cascade_service_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_supernode_action_cascade_service_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_supernode_action_cascade_service_proto_rawDesc), len(file_supernode_action_cascade_service_proto_rawDesc)), NumEnums: 1, NumMessages: 7, NumExtensions: 0, @@ -701,7 +665,6 @@ func file_supernode_action_cascade_service_proto_init() { MessageInfos: file_supernode_action_cascade_service_proto_msgTypes, }.Build() File_supernode_action_cascade_service_proto = out.File - file_supernode_action_cascade_service_proto_rawDesc = nil file_supernode_action_cascade_service_proto_goTypes = nil file_supernode_action_cascade_service_proto_depIdxs = nil } diff --git a/gen/supernode/service.pb.go b/gen/supernode/service.pb.go index 0c0a5b3a..ad1ff814 100644 --- a/gen/supernode/service.pb.go +++ b/gen/supernode/service.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.21.12 // source: supernode/service.proto @@ -12,6 +12,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -22,9 +23,9 @@ const ( ) type ListServicesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ListServicesRequest) Reset() { @@ -58,12 +59,11 @@ func (*ListServicesRequest) Descriptor() ([]byte, []int) { } type ListServicesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Services []*ServiceInfo `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` + Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` unknownFields protoimpl.UnknownFields - - Services []*ServiceInfo `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` - Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ListServicesResponse) Reset() { @@ -111,12 +111,11 @@ func (x *ListServicesResponse) GetCount() int32 { } type ServiceInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Methods []string `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Methods []string `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ServiceInfo) Reset() { @@ -165,11 +164,10 @@ func (x *ServiceInfo) GetMethods() []string { // Raw pprof request/response messages type RawPprofRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Debug int32 `protobuf:"varint,1,opt,name=debug,proto3" json:"debug,omitempty"` // Debug level (0 for binary, >0 for text) unknownFields protoimpl.UnknownFields - - Debug int32 `protobuf:"varint,1,opt,name=debug,proto3" json:"debug,omitempty"` // Debug level (0 for binary, >0 for text) + sizeCache protoimpl.SizeCache } func (x *RawPprofRequest) Reset() { @@ -210,11 +208,10 @@ func (x *RawPprofRequest) GetDebug() int32 { } type RawPprofCpuRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Seconds int32 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // CPU profile duration in seconds (default 30) unknownFields protoimpl.UnknownFields - - Seconds int32 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // CPU profile duration in seconds (default 30) + sizeCache protoimpl.SizeCache } func (x *RawPprofCpuRequest) Reset() { @@ -255,11 +252,10 @@ func (x *RawPprofCpuRequest) GetSeconds() int32 { } type RawPprofResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // Raw pprof data exactly as returned by runtime/pprof unknownFields protoimpl.UnknownFields - - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // Raw pprof data exactly as returned by runtime/pprof + sizeCache protoimpl.SizeCache } func (x *RawPprofResponse) Reset() { @@ -301,141 +297,45 @@ func (x *RawPprofResponse) GetData() []byte { var File_supernode_service_proto protoreflect.FileDescriptor -var file_supernode_service_proto_rawDesc = []byte{ - 0x0a, 0x17, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x1a, 0x16, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x4c, 0x69, - 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x22, 0x60, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x14, 0x0a, - 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x22, 0x3b, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, - 0x22, 0x27, 0x0a, 0x0f, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x22, 0x2e, 0x0a, 0x12, 0x52, 0x61, 0x77, - 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x70, 0x75, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x26, 0x0a, 0x10, 0x52, 0x61, 0x77, - 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, - 0x61, 0x32, 0xec, 0x0b, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, - 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, - 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x12, 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x67, 0x0a, 0x0b, 0x47, - 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, - 0x70, 0x72, 0x6f, 0x66, 0x12, 0x70, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, - 0x72, 0x6f, 0x66, 0x48, 0x65, 0x61, 0x70, 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, - 0x66, 0x2f, 0x68, 0x65, 0x61, 0x70, 0x12, 0x7a, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, - 0x50, 0x70, 0x72, 0x6f, 0x66, 0x47, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x12, 0x1a, - 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, - 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, - 0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, - 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, - 0x6e, 0x65, 0x12, 0x74, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, - 0x66, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x73, 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, 0x1e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, - 0x66, 0x2f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x73, 0x12, 0x72, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x52, - 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1a, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, - 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x12, 0x1d, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, - 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x72, 0x0a, 0x10, - 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4d, 0x75, 0x74, 0x65, 0x78, - 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, - 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, - 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x1f, 0x12, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x6d, 0x75, 0x74, 0x65, 0x78, - 0x12, 0x80, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, - 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, - 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x12, 0x24, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, - 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x12, 0x79, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, - 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x1d, 0x2e, 0x73, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x70, - 0x75, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, - 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x76, - 0x0a, 0x12, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x6d, 0x64, - 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, - 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, - 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x63, - 0x6d, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x74, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, - 0x50, 0x70, 0x72, 0x6f, 0x66, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x1a, 0x2e, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, 0x1e, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, - 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x72, 0x0a, 0x10, - 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x72, 0x61, 0x63, 0x65, - 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, - 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, - 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x1f, 0x12, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, - 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_supernode_service_proto_rawDesc = "" + + "\n" + + "\x17supernode/service.proto\x12\tsupernode\x1a\x16supernode/status.proto\x1a\x1cgoogle/api/annotations.proto\"\x15\n" + + "\x13ListServicesRequest\"`\n" + + "\x14ListServicesResponse\x122\n" + + "\bservices\x18\x01 \x03(\v2\x16.supernode.ServiceInfoR\bservices\x12\x14\n" + + "\x05count\x18\x02 \x01(\x05R\x05count\";\n" + + "\vServiceInfo\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + + "\amethods\x18\x02 \x03(\tR\amethods\"'\n" + + "\x0fRawPprofRequest\x12\x14\n" + + "\x05debug\x18\x01 \x01(\x05R\x05debug\".\n" + + "\x12RawPprofCpuRequest\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x05R\aseconds\"&\n" + + "\x10RawPprofResponse\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data2\xec\v\n" + + "\x10SupernodeService\x12X\n" + + "\tGetStatus\x12\x18.supernode.StatusRequest\x1a\x19.supernode.StatusResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/status\x12i\n" + + "\fListServices\x12\x1e.supernode.ListServicesRequest\x1a\x1f.supernode.ListServicesResponse\"\x18\x82\xd3\xe4\x93\x02\x12\x12\x10/api/v1/services\x12g\n" + + "\vGetRawPprof\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"\x1f\x82\xd3\xe4\x93\x02\x19\x12\x17/api/v1/debug/raw/pprof\x12p\n" + + "\x0fGetRawPprofHeap\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"$\x82\xd3\xe4\x93\x02\x1e\x12\x1c/api/v1/debug/raw/pprof/heap\x12z\n" + + "\x14GetRawPprofGoroutine\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\")\x82\xd3\xe4\x93\x02#\x12!/api/v1/debug/raw/pprof/goroutine\x12t\n" + + "\x11GetRawPprofAllocs\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"&\x82\xd3\xe4\x93\x02 \x12\x1e/api/v1/debug/raw/pprof/allocs\x12r\n" + + "\x10GetRawPprofBlock\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/api/v1/debug/raw/pprof/block\x12r\n" + + "\x10GetRawPprofMutex\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/api/v1/debug/raw/pprof/mutex\x12\x80\x01\n" + + "\x17GetRawPprofThreadcreate\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\",\x82\xd3\xe4\x93\x02&\x12$/api/v1/debug/raw/pprof/threadcreate\x12y\n" + + "\x12GetRawPprofProfile\x12\x1d.supernode.RawPprofCpuRequest\x1a\x1b.supernode.RawPprofResponse\"'\x82\xd3\xe4\x93\x02!\x12\x1f/api/v1/debug/raw/pprof/profile\x12v\n" + + "\x12GetRawPprofCmdline\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"'\x82\xd3\xe4\x93\x02!\x12\x1f/api/v1/debug/raw/pprof/cmdline\x12t\n" + + "\x11GetRawPprofSymbol\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"&\x82\xd3\xe4\x93\x02 \x12\x1e/api/v1/debug/raw/pprof/symbol\x12r\n" + + "\x10GetRawPprofTrace\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/api/v1/debug/raw/pprof/traceB6Z4github.com/LumeraProtocol/supernode/v2/gen/supernodeb\x06proto3" var ( file_supernode_service_proto_rawDescOnce sync.Once - file_supernode_service_proto_rawDescData = file_supernode_service_proto_rawDesc + file_supernode_service_proto_rawDescData []byte ) func file_supernode_service_proto_rawDescGZIP() []byte { file_supernode_service_proto_rawDescOnce.Do(func() { - file_supernode_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_service_proto_rawDescData) + file_supernode_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_supernode_service_proto_rawDesc), len(file_supernode_service_proto_rawDesc))) }) return file_supernode_service_proto_rawDescData } @@ -496,7 +396,7 @@ func file_supernode_service_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_supernode_service_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_supernode_service_proto_rawDesc), len(file_supernode_service_proto_rawDesc)), NumEnums: 0, NumMessages: 6, NumExtensions: 0, @@ -507,7 +407,6 @@ func file_supernode_service_proto_init() { MessageInfos: file_supernode_service_proto_msgTypes, }.Build() File_supernode_service_proto = out.File - file_supernode_service_proto_rawDesc = nil file_supernode_service_proto_goTypes = nil file_supernode_service_proto_depIdxs = nil } diff --git a/gen/supernode/service.pb.gw.go b/gen/supernode/service.pb.gw.go index 89e6ca78..93983b0f 100644 --- a/gen/supernode/service.pb.gw.go +++ b/gen/supernode/service.pb.gw.go @@ -10,7 +10,6 @@ package supernode import ( "context" - "errors" "io" "net/http" @@ -25,470 +24,478 @@ import ( ) // Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + var ( - _ codes.Code - _ io.Reader - _ status.Status - _ = errors.New - _ = runtime.String - _ = utilities.NewDoubleArray - _ = metadata.Join + filter_SupernodeService_GetStatus_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} ) -var filter_SupernodeService_GetStatus_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} - func request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq StatusRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq StatusRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetStatus_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq StatusRequest - metadata runtime.ServerMetadata - ) + var protoReq StatusRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetStatus_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetStatus(ctx, &protoReq) return msg, metadata, err + } func request_SupernodeService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq ListServicesRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq ListServicesRequest + var metadata runtime.ServerMetadata + msg, err := client.ListServices(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq ListServicesRequest - metadata runtime.ServerMetadata - ) + var protoReq ListServicesRequest + var metadata runtime.ServerMetadata + msg, err := server.ListServices(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprof_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprof_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprof_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprof_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprof(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprof_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprof_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprof(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofHeap_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofHeap_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofHeap_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofHeap(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofHeap_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofHeap(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofGoroutine_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofGoroutine_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofGoroutine_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofGoroutine(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofGoroutine_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofGoroutine(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofAllocs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofAllocs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofAllocs_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofAllocs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofAllocs_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofAllocs(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofBlock_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofBlock_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofBlock_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofBlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofBlock_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofBlock(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofMutex_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofMutex_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofMutex_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofMutex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofMutex_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofMutex(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofThreadcreate_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofThreadcreate_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofThreadcreate_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofThreadcreate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofThreadcreate_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofThreadcreate(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofProfile_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofProfile_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofCpuRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofCpuRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofProfile_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofProfile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofCpuRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofCpuRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofProfile_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofProfile(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofCmdline_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofCmdline_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofCmdline_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofCmdline_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofCmdline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofCmdline_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofCmdline_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofCmdline(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofSymbol_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofSymbol_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofSymbol_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofSymbol_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofSymbol(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofSymbol_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofSymbol_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofSymbol(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofTrace_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofTrace_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofTrace_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofTrace_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofTrace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofTrace_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofTrace_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofTrace(ctx, &protoReq) return msg, metadata, err + } // RegisterSupernodeServiceHandlerServer registers the http handlers for service SupernodeService to "mux". // UnaryRPC :call SupernodeServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. // Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterSupernodeServiceHandlerFromEndpoint instead. -// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SupernodeServiceServer) error { - mux.Handle(http.MethodGet, pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -500,15 +507,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -520,15 +532,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -540,15 +557,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -560,15 +582,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -580,15 +607,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -600,15 +632,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofAllocs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -620,15 +657,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofBlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -640,15 +682,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofMutex_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -660,15 +707,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -680,15 +732,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofProfile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -700,15 +757,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofCmdline_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -720,15 +782,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofSymbol_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -740,7 +807,9 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofTrace_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) return nil @@ -767,6 +836,7 @@ func RegisterSupernodeServiceHandlerFromEndpoint(ctx context.Context, mux *runti } }() }() + return RegisterSupernodeServiceHandler(ctx, mux, conn) } @@ -780,13 +850,16 @@ func RegisterSupernodeServiceHandler(ctx context.Context, mux *runtime.ServeMux, // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "SupernodeServiceClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "SupernodeServiceClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "SupernodeServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares. +// "SupernodeServiceClient" to call the correct interceptors. func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client SupernodeServiceClient) error { - mux.Handle(http.MethodGet, pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -797,13 +870,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -814,13 +892,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -831,13 +914,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -848,13 +936,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -865,13 +958,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -882,13 +980,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofAllocs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -899,13 +1002,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofBlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -916,13 +1024,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofMutex_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -933,13 +1046,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -950,13 +1068,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofProfile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -967,13 +1090,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofCmdline_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -984,13 +1112,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofSymbol_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -1001,39 +1134,66 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofTrace_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + return nil } var ( - pattern_SupernodeService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "")) - pattern_SupernodeService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "services"}, "")) - pattern_SupernodeService_GetRawPprof_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "raw", "pprof"}, "")) - pattern_SupernodeService_GetRawPprofHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "heap"}, "")) - pattern_SupernodeService_GetRawPprofGoroutine_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "goroutine"}, "")) - pattern_SupernodeService_GetRawPprofAllocs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "allocs"}, "")) - pattern_SupernodeService_GetRawPprofBlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "block"}, "")) - pattern_SupernodeService_GetRawPprofMutex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "mutex"}, "")) + pattern_SupernodeService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "")) + + pattern_SupernodeService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "services"}, "")) + + pattern_SupernodeService_GetRawPprof_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "raw", "pprof"}, "")) + + pattern_SupernodeService_GetRawPprofHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "heap"}, "")) + + pattern_SupernodeService_GetRawPprofGoroutine_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "goroutine"}, "")) + + pattern_SupernodeService_GetRawPprofAllocs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "allocs"}, "")) + + pattern_SupernodeService_GetRawPprofBlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "block"}, "")) + + pattern_SupernodeService_GetRawPprofMutex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "mutex"}, "")) + pattern_SupernodeService_GetRawPprofThreadcreate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "threadcreate"}, "")) - pattern_SupernodeService_GetRawPprofProfile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "profile"}, "")) - pattern_SupernodeService_GetRawPprofCmdline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "cmdline"}, "")) - pattern_SupernodeService_GetRawPprofSymbol_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "symbol"}, "")) - pattern_SupernodeService_GetRawPprofTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "trace"}, "")) + + pattern_SupernodeService_GetRawPprofProfile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "profile"}, "")) + + pattern_SupernodeService_GetRawPprofCmdline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "cmdline"}, "")) + + pattern_SupernodeService_GetRawPprofSymbol_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "symbol"}, "")) + + pattern_SupernodeService_GetRawPprofTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "trace"}, "")) ) var ( - forward_SupernodeService_GetStatus_0 = runtime.ForwardResponseMessage - forward_SupernodeService_ListServices_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprof_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofHeap_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofGoroutine_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofAllocs_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofBlock_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofMutex_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetStatus_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_ListServices_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprof_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofHeap_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofGoroutine_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofAllocs_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofBlock_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofMutex_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofThreadcreate_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofProfile_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofCmdline_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofSymbol_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofTrace_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofProfile_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofCmdline_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofSymbol_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofTrace_0 = runtime.ForwardResponseMessage ) diff --git a/gen/supernode/status.pb.go b/gen/supernode/status.pb.go index f3182114..a659d729 100644 --- a/gen/supernode/status.pb.go +++ b/gen/supernode/status.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.21.12 // source: supernode/status.proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -22,13 +23,12 @@ const ( // StatusRequest controls optional metrics in the status response type StatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Optional: include detailed P2P metrics in the response // Maps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true IncludeP2PMetrics bool `protobuf:"varint,1,opt,name=include_p2p_metrics,json=includeP2pMetrics,proto3" json:"include_p2p_metrics,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatusRequest) Reset() { @@ -70,10 +70,7 @@ func (x *StatusRequest) GetIncludeP2PMetrics() bool { // The StatusResponse represents system status with clear organization type StatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Supernode version UptimeSeconds uint64 `protobuf:"varint,2,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` // Uptime in seconds Resources *StatusResponse_Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` @@ -82,6 +79,8 @@ type StatusResponse struct { Rank int32 `protobuf:"varint,7,opt,name=rank,proto3" json:"rank,omitempty"` // Rank in the top supernodes list (0 if not in top list) IpAddress string `protobuf:"bytes,8,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Supernode IP address with port (e.g., "192.168.1.1:4445") P2PMetrics *StatusResponse_P2PMetrics `protobuf:"bytes,9,opt,name=p2p_metrics,json=p2pMetrics,proto3" json:"p2p_metrics,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatusResponse) Reset() { @@ -172,14 +171,13 @@ func (x *StatusResponse) GetP2PMetrics() *StatusResponse_P2PMetrics { // System resource information type StatusResponse_Resources struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Cpu *StatusResponse_Resources_CPU `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"` Memory *StatusResponse_Resources_Memory `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` StorageVolumes []*StatusResponse_Resources_Storage `protobuf:"bytes,3,rep,name=storage_volumes,json=storageVolumes,proto3" json:"storage_volumes,omitempty"` HardwareSummary string `protobuf:"bytes,4,opt,name=hardware_summary,json=hardwareSummary,proto3" json:"hardware_summary,omitempty"` // Formatted hardware summary (e.g., "8 cores / 32GB RAM") + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatusResponse_Resources) Reset() { @@ -242,12 +240,11 @@ func (x *StatusResponse_Resources) GetHardwareSummary() string { // Network information type StatusResponse_Network struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + PeersCount int32 `protobuf:"varint,1,opt,name=peers_count,json=peersCount,proto3" json:"peers_count,omitempty"` // Number of connected peers in P2P network + PeerAddresses []string `protobuf:"bytes,2,rep,name=peer_addresses,json=peerAddresses,proto3" json:"peer_addresses,omitempty"` // List of connected peer addresses (optional, may be empty for privacy) unknownFields protoimpl.UnknownFields - - PeersCount int32 `protobuf:"varint,1,opt,name=peers_count,json=peersCount,proto3" json:"peers_count,omitempty"` // Number of connected peers in P2P network - PeerAddresses []string `protobuf:"bytes,2,rep,name=peer_addresses,json=peerAddresses,proto3" json:"peer_addresses,omitempty"` // List of connected peer addresses (optional, may be empty for privacy) + sizeCache protoimpl.SizeCache } func (x *StatusResponse_Network) Reset() { @@ -296,16 +293,15 @@ func (x *StatusResponse_Network) GetPeerAddresses() []string { // P2P metrics and diagnostics (additive field) type StatusResponse_P2PMetrics struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` DhtMetrics *StatusResponse_P2PMetrics_DhtMetrics `protobuf:"bytes,1,opt,name=dht_metrics,json=dhtMetrics,proto3" json:"dht_metrics,omitempty"` - NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` BanList []*StatusResponse_P2PMetrics_BanEntry `protobuf:"bytes,4,rep,name=ban_list,json=banList,proto3" json:"ban_list,omitempty"` Database *StatusResponse_P2PMetrics_DatabaseStats `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` Disk *StatusResponse_P2PMetrics_DiskStatus `protobuf:"bytes,6,opt,name=disk,proto3" json:"disk,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics) Reset() { @@ -381,12 +377,11 @@ func (x *StatusResponse_P2PMetrics) GetDisk() *StatusResponse_P2PMetrics_DiskSta } type StatusResponse_Resources_CPU struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + UsagePercent float64 `protobuf:"fixed64,1,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // CPU usage percentage (0-100) + Cores int32 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores,omitempty"` // Number of CPU cores unknownFields protoimpl.UnknownFields - - UsagePercent float64 `protobuf:"fixed64,1,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // CPU usage percentage (0-100) - Cores int32 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores,omitempty"` // Number of CPU cores + sizeCache protoimpl.SizeCache } func (x *StatusResponse_Resources_CPU) Reset() { @@ -434,14 +429,13 @@ func (x *StatusResponse_Resources_CPU) GetCores() int32 { } type StatusResponse_Resources_Memory struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + TotalGb float64 `protobuf:"fixed64,1,opt,name=total_gb,json=totalGb,proto3" json:"total_gb,omitempty"` // Total memory in GB + UsedGb float64 `protobuf:"fixed64,2,opt,name=used_gb,json=usedGb,proto3" json:"used_gb,omitempty"` // Used memory in GB + AvailableGb float64 `protobuf:"fixed64,3,opt,name=available_gb,json=availableGb,proto3" json:"available_gb,omitempty"` // Available memory in GB + UsagePercent float64 `protobuf:"fixed64,4,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Memory usage percentage (0-100) unknownFields protoimpl.UnknownFields - - TotalGb float64 `protobuf:"fixed64,1,opt,name=total_gb,json=totalGb,proto3" json:"total_gb,omitempty"` // Total memory in GB - UsedGb float64 `protobuf:"fixed64,2,opt,name=used_gb,json=usedGb,proto3" json:"used_gb,omitempty"` // Used memory in GB - AvailableGb float64 `protobuf:"fixed64,3,opt,name=available_gb,json=availableGb,proto3" json:"available_gb,omitempty"` // Available memory in GB - UsagePercent float64 `protobuf:"fixed64,4,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Memory usage percentage (0-100) + sizeCache protoimpl.SizeCache } func (x *StatusResponse_Resources_Memory) Reset() { @@ -503,15 +497,14 @@ func (x *StatusResponse_Resources_Memory) GetUsagePercent() float64 { } type StatusResponse_Resources_Storage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Storage path being monitored - TotalBytes uint64 `protobuf:"varint,2,opt,name=total_bytes,json=totalBytes,proto3" json:"total_bytes,omitempty"` - UsedBytes uint64 `protobuf:"varint,3,opt,name=used_bytes,json=usedBytes,proto3" json:"used_bytes,omitempty"` - AvailableBytes uint64 `protobuf:"varint,4,opt,name=available_bytes,json=availableBytes,proto3" json:"available_bytes,omitempty"` - UsagePercent float64 `protobuf:"fixed64,5,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Storage usage percentage (0-100) + state protoimpl.MessageState `protogen:"open.v1"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Storage path being monitored + TotalBytes uint64 `protobuf:"varint,2,opt,name=total_bytes,json=totalBytes,proto3" json:"total_bytes,omitempty"` + UsedBytes uint64 `protobuf:"varint,3,opt,name=used_bytes,json=usedBytes,proto3" json:"used_bytes,omitempty"` + AvailableBytes uint64 `protobuf:"varint,4,opt,name=available_bytes,json=availableBytes,proto3" json:"available_bytes,omitempty"` + UsagePercent float64 `protobuf:"fixed64,5,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Storage usage percentage (0-100) + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatusResponse_Resources_Storage) Reset() { @@ -581,14 +574,13 @@ func (x *StatusResponse_Resources_Storage) GetUsagePercent() float64 { // Rolling DHT metrics snapshot type StatusResponse_P2PMetrics_DhtMetrics struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` StoreSuccessRecent []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint `protobuf:"bytes,1,rep,name=store_success_recent,json=storeSuccessRecent,proto3" json:"store_success_recent,omitempty"` BatchRetrieveRecent []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint `protobuf:"bytes,2,rep,name=batch_retrieve_recent,json=batchRetrieveRecent,proto3" json:"batch_retrieve_recent,omitempty"` HotPathBannedSkips int64 `protobuf:"varint,3,opt,name=hot_path_banned_skips,json=hotPathBannedSkips,proto3" json:"hot_path_banned_skips,omitempty"` // counter HotPathBanIncrements int64 `protobuf:"varint,4,opt,name=hot_path_ban_increments,json=hotPathBanIncrements,proto3" json:"hot_path_ban_increments,omitempty"` // counter + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_DhtMetrics) Reset() { @@ -651,14 +643,13 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBanIncrements() int64 { // Per-handler counters from network layer type StatusResponse_P2PMetrics_HandleCounters struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Success int64 `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + Failure int64 `protobuf:"varint,3,opt,name=failure,proto3" json:"failure,omitempty"` + Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` unknownFields protoimpl.UnknownFields - - Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` - Success int64 `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` - Failure int64 `protobuf:"varint,3,opt,name=failure,proto3" json:"failure,omitempty"` - Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_HandleCounters) Reset() { @@ -721,16 +712,15 @@ func (x *StatusResponse_P2PMetrics_HandleCounters) GetTimeout() int64 { // Ban list entry type StatusResponse_P2PMetrics_BanEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // printable ID + Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` // last seen IP + Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // last seen port + Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` // failure count + CreatedAtUnix int64 `protobuf:"varint,5,opt,name=created_at_unix,json=createdAtUnix,proto3" json:"created_at_unix,omitempty"` // first ban time (unix seconds) + AgeSeconds int64 `protobuf:"varint,6,opt,name=age_seconds,json=ageSeconds,proto3" json:"age_seconds,omitempty"` // age in seconds unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // printable ID - Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` // last seen IP - Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // last seen port - Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` // failure count - CreatedAtUnix int64 `protobuf:"varint,5,opt,name=created_at_unix,json=createdAtUnix,proto3" json:"created_at_unix,omitempty"` // first ban time (unix seconds) - AgeSeconds int64 `protobuf:"varint,6,opt,name=age_seconds,json=ageSeconds,proto3" json:"age_seconds,omitempty"` // age in seconds + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_BanEntry) Reset() { @@ -807,12 +797,11 @@ func (x *StatusResponse_P2PMetrics_BanEntry) GetAgeSeconds() int64 { // DB stats type StatusResponse_P2PMetrics_DatabaseStats struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - P2PDbSizeMb float64 `protobuf:"fixed64,1,opt,name=p2p_db_size_mb,json=p2pDbSizeMb,proto3" json:"p2p_db_size_mb,omitempty"` - P2PDbRecordsCount int64 `protobuf:"varint,2,opt,name=p2p_db_records_count,json=p2pDbRecordsCount,proto3" json:"p2p_db_records_count,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + P2PDbSizeMb float64 `protobuf:"fixed64,1,opt,name=p2p_db_size_mb,json=p2pDbSizeMb,proto3" json:"p2p_db_size_mb,omitempty"` + P2PDbRecordsCount int64 `protobuf:"varint,2,opt,name=p2p_db_records_count,json=p2pDbRecordsCount,proto3" json:"p2p_db_records_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_DatabaseStats) Reset() { @@ -861,13 +850,12 @@ func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbRecordsCount() int64 { // Disk status type StatusResponse_P2PMetrics_DiskStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + AllMb float64 `protobuf:"fixed64,1,opt,name=all_mb,json=allMb,proto3" json:"all_mb,omitempty"` + UsedMb float64 `protobuf:"fixed64,2,opt,name=used_mb,json=usedMb,proto3" json:"used_mb,omitempty"` + FreeMb float64 `protobuf:"fixed64,3,opt,name=free_mb,json=freeMb,proto3" json:"free_mb,omitempty"` unknownFields protoimpl.UnknownFields - - AllMb float64 `protobuf:"fixed64,1,opt,name=all_mb,json=allMb,proto3" json:"all_mb,omitempty"` - UsedMb float64 `protobuf:"fixed64,2,opt,name=used_mb,json=usedMb,proto3" json:"used_mb,omitempty"` - FreeMb float64 `protobuf:"fixed64,3,opt,name=free_mb,json=freeMb,proto3" json:"free_mb,omitempty"` + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_DiskStatus) Reset() { @@ -922,14 +910,13 @@ func (x *StatusResponse_P2PMetrics_DiskStatus) GetFreeMb() float64 { } type StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) + Requests int32 `protobuf:"varint,2,opt,name=requests,proto3" json:"requests,omitempty"` // total node RPCs attempted + Successful int32 `protobuf:"varint,3,opt,name=successful,proto3" json:"successful,omitempty"` // successful node RPCs + SuccessRate float64 `protobuf:"fixed64,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` // percentage (0-100) unknownFields protoimpl.UnknownFields - - TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) - Requests int32 `protobuf:"varint,2,opt,name=requests,proto3" json:"requests,omitempty"` // total node RPCs attempted - Successful int32 `protobuf:"varint,3,opt,name=successful,proto3" json:"successful,omitempty"` // successful node RPCs - SuccessRate float64 `protobuf:"fixed64,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` // percentage (0-100) + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Reset() { @@ -991,16 +978,15 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessRate( } type StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) + Keys int32 `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` // keys requested + Required int32 `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` // required count + FoundLocal int32 `protobuf:"varint,4,opt,name=found_local,json=foundLocal,proto3" json:"found_local,omitempty"` // found locally + FoundNetwork int32 `protobuf:"varint,5,opt,name=found_network,json=foundNetwork,proto3" json:"found_network,omitempty"` // found on network + DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` // duration in milliseconds unknownFields protoimpl.UnknownFields - - TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) - Keys int32 `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` // keys requested - Required int32 `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` // required count - FoundLocal int32 `protobuf:"varint,4,opt,name=found_local,json=foundLocal,proto3" json:"found_local,omitempty"` // found locally - FoundNetwork int32 `protobuf:"varint,5,opt,name=found_network,json=foundNetwork,proto3" json:"found_network,omitempty"` // found on network - DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` // duration in milliseconds + sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Reset() { @@ -1077,217 +1063,114 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetDurationMs( var File_supernode_status_proto protoreflect.FileDescriptor -var file_supernode_status_proto_rawDesc = []byte{ - 0x0a, 0x16, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x22, 0x3f, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, - 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x32, 0x70, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x22, 0xca, 0x17, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, - 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x75, 0x70, 0x74, 0x69, 0x6d, - 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x65, 0x72, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x07, - 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, - 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, - 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x1d, 0x0a, - 0x0a, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x45, 0x0a, 0x0b, - 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x70, 0x32, 0x70, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x1a, 0x82, 0x05, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x12, 0x39, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, - 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, 0x42, 0x0a, 0x06, - 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, - 0x12, 0x54, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, - 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, - 0x72, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x1a, 0x40, 0x0a, 0x03, 0x43, 0x50, 0x55, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, - 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, - 0x72, 0x65, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x19, - 0x0a, 0x08, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x67, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x07, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x47, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, - 0x64, 0x5f, 0x67, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, - 0x47, 0x62, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x67, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, - 0x62, 0x6c, 0x65, 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, - 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, - 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0xab, 0x01, 0x0a, 0x07, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x75, - 0x73, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x09, 0x75, 0x73, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x76, - 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, - 0x63, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, - 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x51, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x73, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x65, - 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0xf3, 0x0e, 0x0a, 0x0a, - 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x50, 0x0a, 0x0b, 0x64, 0x68, - 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x52, 0x0a, 0x64, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x74, 0x0a, 0x16, - 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x6d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x6e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x12, 0x65, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, - 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, - 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, - 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x48, 0x0a, 0x08, 0x62, 0x61, 0x6e, - 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x2e, 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x61, 0x6e, 0x4c, - 0x69, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x61, 0x74, 0x61, - 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, - 0x61, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x1a, 0xc0, 0x05, 0x0a, 0x0a, 0x44, 0x68, 0x74, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x73, 0x0a, 0x14, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x12, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x76, 0x0a, 0x15, - 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, 0x72, - 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x74, - 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, - 0x13, 0x62, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x52, 0x65, - 0x63, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, - 0x5f, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x12, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x6e, - 0x65, 0x64, 0x53, 0x6b, 0x69, 0x70, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x68, 0x6f, 0x74, 0x5f, 0x70, - 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, - 0x68, 0x42, 0x61, 0x6e, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x8f, - 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, - 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, - 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, - 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x1e, 0x0a, - 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x12, 0x21, 0x0a, - 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x0b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, - 0x1a, 0xc8, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, - 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, - 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, - 0x55, 0x6e, 0x69, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, - 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, - 0x69, 0x72, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x6f, - 0x63, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x6f, 0x75, 0x6e, 0x64, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x66, 0x6f, - 0x75, 0x6e, 0x64, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x1a, 0x74, 0x0a, 0x0e, 0x48, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, - 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, - 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x1a, 0x9d, 0x01, 0x0a, 0x08, 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, - 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, - 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6e, 0x69, 0x78, - 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x61, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x73, 0x1a, 0x65, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x70, 0x32, 0x70, 0x44, - 0x62, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x2f, 0x0a, 0x14, 0x70, 0x32, 0x70, 0x5f, 0x64, - 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x70, 0x32, 0x70, 0x44, 0x62, 0x52, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x55, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, 0x62, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4d, 0x62, 0x12, 0x17, 0x0a, - 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x6d, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, - 0x75, 0x73, 0x65, 0x64, 0x4d, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x6d, - 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x66, 0x72, 0x65, 0x65, 0x4d, 0x62, 0x1a, - 0x7c, 0x0a, 0x19, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, - 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, - 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, - 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, - 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} +const file_supernode_status_proto_rawDesc = "" + + "\n" + + "\x16supernode/status.proto\x12\tsupernode\"?\n" + + "\rStatusRequest\x12.\n" + + "\x13include_p2p_metrics\x18\x01 \x01(\bR\x11includeP2pMetrics\"\xca\x17\n" + + "\x0eStatusResponse\x12\x18\n" + + "\aversion\x18\x01 \x01(\tR\aversion\x12%\n" + + "\x0euptime_seconds\x18\x02 \x01(\x04R\ruptimeSeconds\x12A\n" + + "\tresources\x18\x03 \x01(\v2#.supernode.StatusResponse.ResourcesR\tresources\x12/\n" + + "\x13registered_services\x18\x05 \x03(\tR\x12registeredServices\x12;\n" + + "\anetwork\x18\x06 \x01(\v2!.supernode.StatusResponse.NetworkR\anetwork\x12\x12\n" + + "\x04rank\x18\a \x01(\x05R\x04rank\x12\x1d\n" + + "\n" + + "ip_address\x18\b \x01(\tR\tipAddress\x12E\n" + + "\vp2p_metrics\x18\t \x01(\v2$.supernode.StatusResponse.P2PMetricsR\n" + + "p2pMetrics\x1a\x82\x05\n" + + "\tResources\x129\n" + + "\x03cpu\x18\x01 \x01(\v2'.supernode.StatusResponse.Resources.CPUR\x03cpu\x12B\n" + + "\x06memory\x18\x02 \x01(\v2*.supernode.StatusResponse.Resources.MemoryR\x06memory\x12T\n" + + "\x0fstorage_volumes\x18\x03 \x03(\v2+.supernode.StatusResponse.Resources.StorageR\x0estorageVolumes\x12)\n" + + "\x10hardware_summary\x18\x04 \x01(\tR\x0fhardwareSummary\x1a@\n" + + "\x03CPU\x12#\n" + + "\rusage_percent\x18\x01 \x01(\x01R\fusagePercent\x12\x14\n" + + "\x05cores\x18\x02 \x01(\x05R\x05cores\x1a\x84\x01\n" + + "\x06Memory\x12\x19\n" + + "\btotal_gb\x18\x01 \x01(\x01R\atotalGb\x12\x17\n" + + "\aused_gb\x18\x02 \x01(\x01R\x06usedGb\x12!\n" + + "\favailable_gb\x18\x03 \x01(\x01R\vavailableGb\x12#\n" + + "\rusage_percent\x18\x04 \x01(\x01R\fusagePercent\x1a\xab\x01\n" + + "\aStorage\x12\x12\n" + + "\x04path\x18\x01 \x01(\tR\x04path\x12\x1f\n" + + "\vtotal_bytes\x18\x02 \x01(\x04R\n" + + "totalBytes\x12\x1d\n" + + "\n" + + "used_bytes\x18\x03 \x01(\x04R\tusedBytes\x12'\n" + + "\x0favailable_bytes\x18\x04 \x01(\x04R\x0eavailableBytes\x12#\n" + + "\rusage_percent\x18\x05 \x01(\x01R\fusagePercent\x1aQ\n" + + "\aNetwork\x12\x1f\n" + + "\vpeers_count\x18\x01 \x01(\x05R\n" + + "peersCount\x12%\n" + + "\x0epeer_addresses\x18\x02 \x03(\tR\rpeerAddresses\x1a\xf3\x0e\n" + + "\n" + + "P2PMetrics\x12P\n" + + "\vdht_metrics\x18\x01 \x01(\v2/.supernode.StatusResponse.P2PMetrics.DhtMetricsR\n" + + "dhtMetrics\x12t\n" + + "\x16network_handle_metrics\x18\x02 \x03(\v2>.supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntryR\x14networkHandleMetrics\x12e\n" + + "\x11conn_pool_metrics\x18\x03 \x03(\v29.supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntryR\x0fconnPoolMetrics\x12H\n" + + "\bban_list\x18\x04 \x03(\v2-.supernode.StatusResponse.P2PMetrics.BanEntryR\abanList\x12N\n" + + "\bdatabase\x18\x05 \x01(\v22.supernode.StatusResponse.P2PMetrics.DatabaseStatsR\bdatabase\x12C\n" + + "\x04disk\x18\x06 \x01(\v2/.supernode.StatusResponse.P2PMetrics.DiskStatusR\x04disk\x1a\xc0\x05\n" + + "\n" + + "DhtMetrics\x12s\n" + + "\x14store_success_recent\x18\x01 \x03(\v2A.supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPointR\x12storeSuccessRecent\x12v\n" + + "\x15batch_retrieve_recent\x18\x02 \x03(\v2B.supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePointR\x13batchRetrieveRecent\x121\n" + + "\x15hot_path_banned_skips\x18\x03 \x01(\x03R\x12hotPathBannedSkips\x125\n" + + "\x17hot_path_ban_increments\x18\x04 \x01(\x03R\x14hotPathBanIncrements\x1a\x8f\x01\n" + + "\x11StoreSuccessPoint\x12\x1b\n" + + "\ttime_unix\x18\x01 \x01(\x03R\btimeUnix\x12\x1a\n" + + "\brequests\x18\x02 \x01(\x05R\brequests\x12\x1e\n" + + "\n" + + "successful\x18\x03 \x01(\x05R\n" + + "successful\x12!\n" + + "\fsuccess_rate\x18\x04 \x01(\x01R\vsuccessRate\x1a\xc8\x01\n" + + "\x12BatchRetrievePoint\x12\x1b\n" + + "\ttime_unix\x18\x01 \x01(\x03R\btimeUnix\x12\x12\n" + + "\x04keys\x18\x02 \x01(\x05R\x04keys\x12\x1a\n" + + "\brequired\x18\x03 \x01(\x05R\brequired\x12\x1f\n" + + "\vfound_local\x18\x04 \x01(\x05R\n" + + "foundLocal\x12#\n" + + "\rfound_network\x18\x05 \x01(\x05R\ffoundNetwork\x12\x1f\n" + + "\vduration_ms\x18\x06 \x01(\x03R\n" + + "durationMs\x1at\n" + + "\x0eHandleCounters\x12\x14\n" + + "\x05total\x18\x01 \x01(\x03R\x05total\x12\x18\n" + + "\asuccess\x18\x02 \x01(\x03R\asuccess\x12\x18\n" + + "\afailure\x18\x03 \x01(\x03R\afailure\x12\x18\n" + + "\atimeout\x18\x04 \x01(\x03R\atimeout\x1a\x9d\x01\n" + + "\bBanEntry\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x0e\n" + + "\x02ip\x18\x02 \x01(\tR\x02ip\x12\x12\n" + + "\x04port\x18\x03 \x01(\rR\x04port\x12\x14\n" + + "\x05count\x18\x04 \x01(\x05R\x05count\x12&\n" + + "\x0fcreated_at_unix\x18\x05 \x01(\x03R\rcreatedAtUnix\x12\x1f\n" + + "\vage_seconds\x18\x06 \x01(\x03R\n" + + "ageSeconds\x1ae\n" + + "\rDatabaseStats\x12#\n" + + "\x0ep2p_db_size_mb\x18\x01 \x01(\x01R\vp2pDbSizeMb\x12/\n" + + "\x14p2p_db_records_count\x18\x02 \x01(\x03R\x11p2pDbRecordsCount\x1aU\n" + + "\n" + + "DiskStatus\x12\x15\n" + + "\x06all_mb\x18\x01 \x01(\x01R\x05allMb\x12\x17\n" + + "\aused_mb\x18\x02 \x01(\x01R\x06usedMb\x12\x17\n" + + "\afree_mb\x18\x03 \x01(\x01R\x06freeMb\x1a|\n" + + "\x19NetworkHandleMetricsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12I\n" + + "\x05value\x18\x02 \x01(\v23.supernode.StatusResponse.P2PMetrics.HandleCountersR\x05value:\x028\x01\x1aB\n" + + "\x14ConnPoolMetricsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\x03R\x05value:\x028\x01B6Z4github.com/LumeraProtocol/supernode/v2/gen/supernodeb\x06proto3" var ( file_supernode_status_proto_rawDescOnce sync.Once - file_supernode_status_proto_rawDescData = file_supernode_status_proto_rawDesc + file_supernode_status_proto_rawDescData []byte ) func file_supernode_status_proto_rawDescGZIP() []byte { file_supernode_status_proto_rawDescOnce.Do(func() { - file_supernode_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_status_proto_rawDescData) + file_supernode_status_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_supernode_status_proto_rawDesc), len(file_supernode_status_proto_rawDesc))) }) return file_supernode_status_proto_rawDescData } @@ -1344,7 +1227,7 @@ func file_supernode_status_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_supernode_status_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_supernode_status_proto_rawDesc), len(file_supernode_status_proto_rawDesc)), NumEnums: 0, NumMessages: 17, NumExtensions: 0, @@ -1355,7 +1238,6 @@ func file_supernode_status_proto_init() { MessageInfos: file_supernode_status_proto_msgTypes, }.Build() File_supernode_status_proto = out.File - file_supernode_status_proto_rawDesc = nil file_supernode_status_proto_goTypes = nil file_supernode_status_proto_depIdxs = nil } diff --git a/go.mod b/go.mod index 1bbdf7bb..4b484c0c 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,6 @@ require ( github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 github.com/google/uuid v1.6.0 - github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 github.com/jmoiron/sqlx v1.4.0 github.com/json-iterator/go v1.1.12 @@ -114,6 +113,7 @@ require ( github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect diff --git a/pkg/codec/codec_default_test.go b/pkg/codec/codec_default_test.go index cdd54aee..a55e605d 100644 --- a/pkg/codec/codec_default_test.go +++ b/pkg/codec/codec_default_test.go @@ -10,9 +10,9 @@ import ( // Constants: set InputPath and TaskID. BaseDir is the current directory. const ( - BaseDir = "/home/enxsys/Documents/Github/LumeraProtocol/supernode/release" - InputPath = "/home/enxsys/Documents/Github/LumeraProtocol/supernode/tests/system/900.zip" // set to an existing file path before running - TaskID = "rq-dirA" // both tests use the same directory + BaseDir = "" + InputPath = "" // set to an existing file path before running + TaskID = "rq-dirA" // both tests use the same directory ) // TestEncode_ToDirA encodes InputPath into BaseDir/TaskID using default settings. @@ -139,7 +139,7 @@ func TestCreateMetadata_SaveToFile(t *testing.T) { if err != nil { t.Fatalf("marshal metadata: %v", err) } - outPath := "/home/enxsys/Documents/Github/LumeraProtocol/supernode/pkg/codec" + ".layout.json" + outPath := " . " + ".layout.json" if err := os.WriteFile(outPath, data, 0o644); err != nil { t.Fatalf("write output: %v", err) } From 662f237be44ae0c59b7916fd0fe50fd44158ba93 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Thu, 9 Oct 2025 15:52:57 +0500 Subject: [PATCH 15/36] resolve do mod issues --- .github/workflows/build&release.yml | 8 +++ .github/workflows/tests.yml | 4 +- sn-manager/go.mod | 12 ++--- sn-manager/go.sum | 78 ++++------------------------- 4 files changed, 26 insertions(+), 76 deletions(-) diff --git a/.github/workflows/build&release.yml b/.github/workflows/build&release.yml index 3dbf21bf..b3faf2c4 100644 --- a/.github/workflows/build&release.yml +++ b/.github/workflows/build&release.yml @@ -30,11 +30,15 @@ jobs: - name: Build binaries run: | + # Ensure module metadata is up to date + go mod tidy # Build supernode CGO_ENABLED=1 go build -trimpath -o /tmp/supernode ./supernode # Build sn-manager cd sn-manager + # Ensure sn-manager module metadata is up to date + go mod tidy CGO_ENABLED=0 go build -trimpath -o /tmp/sn-manager . echo "✅ Build successful" @@ -86,6 +90,8 @@ jobs: DD_API_KEY: ${{ secrets.DD_API_KEY }} DD_SITE: ${{ secrets.DD_SITE }} run: | + # Ensure module metadata is up to date + go mod tidy mkdir -p release # Build supernode @@ -105,6 +111,8 @@ jobs: # Build sn-manager cd sn-manager + # Ensure sn-manager module metadata is up to date + go mod tidy CGO_ENABLED=0 \ GOOS=linux \ GOARCH=amd64 \ diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index bad0f6ee..35081f81 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,6 +17,8 @@ jobs: uses: actions/checkout@v4 - name: Setup Go and system deps uses: ./.github/actions/setup-env + - name: Go mod tidy + run: go mod tidy - name: Run unit tests run: go test $(go list ./... | grep -v '/tests') -v @@ -75,4 +77,4 @@ jobs: # run: make setup-supernodes # - name: Run sn-manager e2e tests - # run: make test-sn-manager \ No newline at end of file + # run: make test-sn-manager diff --git a/sn-manager/go.mod b/sn-manager/go.mod index 1beee097..8d29e8e6 100644 --- a/sn-manager/go.mod +++ b/sn-manager/go.mod @@ -3,15 +3,14 @@ module github.com/LumeraProtocol/supernode/v2/sn-manager go 1.24.1 require ( - github.com/AlecAivazis/survey/v2 v2.3.7 - github.com/LumeraProtocol/supernode/v2 v2.0.0-00010101000000-000000000000 - github.com/spf13/cobra v1.8.1 - gopkg.in/yaml.v3 v3.0.1 + github.com/AlecAivazis/survey/v2 v2.3.7 + github.com/LumeraProtocol/supernode/v2 v2.0.0-00010101000000-000000000000 + github.com/spf13/cobra v1.8.1 + gopkg.in/yaml.v3 v3.0.1 ) require ( - github.com/golang/protobuf v1.5.4 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -24,7 +23,6 @@ require ( golang.org/x/sys v0.31.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect - google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect google.golang.org/grpc v1.71.0 // indirect diff --git a/sn-manager/go.sum b/sn-manager/go.sum index 51f96134..6413ef48 100644 --- a/sn-manager/go.sum +++ b/sn-manager/go.sum @@ -1,14 +1,7 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -16,37 +9,28 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -59,8 +43,8 @@ github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyex github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= @@ -85,44 +69,22 @@ go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= -go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -142,40 +104,20 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 h1:6whtk83KtD3FkGrVb2hFXuQ+ZMbCNdakARIn/aHMmG8= -google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094/go.mod h1:Zs4wYw8z1zr6RNF4cwYb31mvN/EGaKAdQjNCF3DW6K4= google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM= google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8= google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 92e557a8a4f545ef40f64a47a993bec3aaf0863c Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Fri, 10 Oct 2025 06:39:56 +0500 Subject: [PATCH 16/36] tighten stable release check --- sn-manager/internal/github/client.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sn-manager/internal/github/client.go b/sn-manager/internal/github/client.go index 70e99d6a..721a02e1 100644 --- a/sn-manager/internal/github/client.go +++ b/sn-manager/internal/github/client.go @@ -6,6 +6,7 @@ import ( "io" "log" "net/http" + "strings" "time" ) @@ -127,7 +128,7 @@ func (c *Client) ListReleases() ([]*Release, error) { func (c *Client) GetLatestStableRelease() (*Release, error) { // Try the latest release endpoint first (single API call) release, err := c.GetLatestRelease() - if err == nil && !release.Draft && !release.Prerelease { + if err == nil && !release.Draft && !release.Prerelease && !strings.Contains(release.TagName, "-") { return release, nil } @@ -139,7 +140,7 @@ func (c *Client) GetLatestStableRelease() (*Release, error) { // Filter for stable releases (not draft, not prerelease) for _, release := range releases { - if !release.Draft && !release.Prerelease { + if !release.Draft && !release.Prerelease && !strings.Contains(release.TagName, "-") { return release, nil } } From 1184a54527038a4d3832d05f99680406f82b9ea9 Mon Sep 17 00:00:00 2001 From: Matee ullah Malik <46045452+mateeullahmalik@users.noreply.github.com> Date: Fri, 10 Oct 2025 17:25:25 +0500 Subject: [PATCH 17/36] Task tracking + Supernode deterministic selection In SDK (#203) * cleanup * supernode task tracking * deterministic supernode selection in sdk * use v1.7.2 lumera in test --- Makefile | 2 +- gen/supernode/service.swagger.json | 27 ++ gen/supernode/status.pb.go | 242 ++++++++++++------ pkg/common/blocktracker/block_tracker.go | 121 --------- pkg/common/blocktracker/block_tracker_test.go | 97 ------- pkg/common/task/action.go | 20 -- pkg/common/task/state/state.go | 174 ------------- pkg/common/task/state/status.go | 34 --- pkg/common/task/task.go | 143 ----------- pkg/common/task/ticket.go | 13 - pkg/common/task/worker.go | 144 ----------- pkg/common/task/worker_test.go | 147 ----------- pkg/task/handle.go | 65 +++++ pkg/task/task.go | 82 ++++++ pkg/task/task_test.go | 161 ++++++++++++ proto/supernode/status.proto | 8 + sdk/task/cascade.go | 7 + sdk/task/download.go | 18 ++ sdk/task/helpers.go | 47 ++++ sdk/task/task.go | 18 +- sn-manager/internal/updater/updater.go | 76 +++--- supernode/status/service.go | 12 + supernode/transport/grpc/cascade/handler.go | 18 ++ 23 files changed, 665 insertions(+), 1011 deletions(-) delete mode 100644 pkg/common/blocktracker/block_tracker.go delete mode 100644 pkg/common/blocktracker/block_tracker_test.go delete mode 100644 pkg/common/task/action.go delete mode 100644 pkg/common/task/state/state.go delete mode 100644 pkg/common/task/state/status.go delete mode 100644 pkg/common/task/task.go delete mode 100644 pkg/common/task/ticket.go delete mode 100644 pkg/common/task/worker.go delete mode 100644 pkg/common/task/worker_test.go create mode 100644 pkg/task/handle.go create mode 100644 pkg/task/task.go create mode 100644 pkg/task/task_test.go diff --git a/Makefile b/Makefile index 2d300b9b..e7c44da4 100644 --- a/Makefile +++ b/Makefile @@ -114,7 +114,7 @@ SETUP_SCRIPT=tests/scripts/setup-supernodes.sh # Optional: specify lumera binary path to skip download LUMERAD_BINARY ?= # Optional: specify installation mode (latest-release, latest-tag, or vX.Y.Z) -INSTALL_MODE ?=latest-tag +INSTALL_MODE ?=v1.7.2 install-lumera: @echo "Installing Lumera..." diff --git a/gen/supernode/service.swagger.json b/gen/supernode/service.swagger.json index c3944e9d..523499b8 100644 --- a/gen/supernode/service.swagger.json +++ b/gen/supernode/service.swagger.json @@ -744,6 +744,25 @@ }, "title": "System resource information" }, + "StatusResponseServiceTasks": { + "type": "object", + "properties": { + "serviceName": { + "type": "string" + }, + "taskIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "taskCount": { + "type": "integer", + "format": "int32" + } + }, + "title": "ServiceTasks contains task information for a specific service" + }, "protobufAny": { "type": "object", "properties": { @@ -827,6 +846,14 @@ "resources": { "$ref": "#/definitions/StatusResponseResources" }, + "runningTasks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/StatusResponseServiceTasks" + }, + "title": "Services with currently running tasks" + }, "registeredServices": { "type": "array", "items": { diff --git a/gen/supernode/status.pb.go b/gen/supernode/status.pb.go index a659d729..74e0d6d7 100644 --- a/gen/supernode/status.pb.go +++ b/gen/supernode/status.pb.go @@ -7,11 +7,12 @@ package supernode import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( @@ -70,15 +71,16 @@ func (x *StatusRequest) GetIncludeP2PMetrics() bool { // The StatusResponse represents system status with clear organization type StatusResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Supernode version - UptimeSeconds uint64 `protobuf:"varint,2,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` // Uptime in seconds - Resources *StatusResponse_Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` - RegisteredServices []string `protobuf:"bytes,5,rep,name=registered_services,json=registeredServices,proto3" json:"registered_services,omitempty"` // All registered/available services - Network *StatusResponse_Network `protobuf:"bytes,6,opt,name=network,proto3" json:"network,omitempty"` // P2P network information - Rank int32 `protobuf:"varint,7,opt,name=rank,proto3" json:"rank,omitempty"` // Rank in the top supernodes list (0 if not in top list) - IpAddress string `protobuf:"bytes,8,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Supernode IP address with port (e.g., "192.168.1.1:4445") - P2PMetrics *StatusResponse_P2PMetrics `protobuf:"bytes,9,opt,name=p2p_metrics,json=p2pMetrics,proto3" json:"p2p_metrics,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Supernode version + UptimeSeconds uint64 `protobuf:"varint,2,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` // Uptime in seconds + Resources *StatusResponse_Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` + RunningTasks []*StatusResponse_ServiceTasks `protobuf:"bytes,4,rep,name=running_tasks,json=runningTasks,proto3" json:"running_tasks,omitempty"` // Services with currently running tasks + RegisteredServices []string `protobuf:"bytes,5,rep,name=registered_services,json=registeredServices,proto3" json:"registered_services,omitempty"` // All registered/available services + Network *StatusResponse_Network `protobuf:"bytes,6,opt,name=network,proto3" json:"network,omitempty"` // P2P network information + Rank int32 `protobuf:"varint,7,opt,name=rank,proto3" json:"rank,omitempty"` // Rank in the top supernodes list (0 if not in top list) + IpAddress string `protobuf:"bytes,8,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Supernode IP address with port (e.g., "192.168.1.1:4445") + P2PMetrics *StatusResponse_P2PMetrics `protobuf:"bytes,9,opt,name=p2p_metrics,json=p2pMetrics,proto3" json:"p2p_metrics,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -134,6 +136,13 @@ func (x *StatusResponse) GetResources() *StatusResponse_Resources { return nil } +func (x *StatusResponse) GetRunningTasks() []*StatusResponse_ServiceTasks { + if x != nil { + return x.RunningTasks + } + return nil +} + func (x *StatusResponse) GetRegisteredServices() []string { if x != nil { return x.RegisteredServices @@ -238,6 +247,67 @@ func (x *StatusResponse_Resources) GetHardwareSummary() string { return "" } +// ServiceTasks contains task information for a specific service +type StatusResponse_ServiceTasks struct { + state protoimpl.MessageState `protogen:"open.v1"` + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + TaskIds []string `protobuf:"bytes,2,rep,name=task_ids,json=taskIds,proto3" json:"task_ids,omitempty"` + TaskCount int32 `protobuf:"varint,3,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_ServiceTasks) Reset() { + *x = StatusResponse_ServiceTasks{} + mi := &file_supernode_status_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_ServiceTasks) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_ServiceTasks) ProtoMessage() {} + +func (x *StatusResponse_ServiceTasks) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_ServiceTasks.ProtoReflect.Descriptor instead. +func (*StatusResponse_ServiceTasks) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *StatusResponse_ServiceTasks) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *StatusResponse_ServiceTasks) GetTaskIds() []string { + if x != nil { + return x.TaskIds + } + return nil +} + +func (x *StatusResponse_ServiceTasks) GetTaskCount() int32 { + if x != nil { + return x.TaskCount + } + return 0 +} + // Network information type StatusResponse_Network struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -249,7 +319,7 @@ type StatusResponse_Network struct { func (x *StatusResponse_Network) Reset() { *x = StatusResponse_Network{} - mi := &file_supernode_status_proto_msgTypes[3] + mi := &file_supernode_status_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -261,7 +331,7 @@ func (x *StatusResponse_Network) String() string { func (*StatusResponse_Network) ProtoMessage() {} func (x *StatusResponse_Network) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[3] + mi := &file_supernode_status_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -274,7 +344,7 @@ func (x *StatusResponse_Network) ProtoReflect() protoreflect.Message { // Deprecated: Use StatusResponse_Network.ProtoReflect.Descriptor instead. func (*StatusResponse_Network) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 1} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 2} } func (x *StatusResponse_Network) GetPeersCount() int32 { @@ -306,7 +376,7 @@ type StatusResponse_P2PMetrics struct { func (x *StatusResponse_P2PMetrics) Reset() { *x = StatusResponse_P2PMetrics{} - mi := &file_supernode_status_proto_msgTypes[4] + mi := &file_supernode_status_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -318,7 +388,7 @@ func (x *StatusResponse_P2PMetrics) String() string { func (*StatusResponse_P2PMetrics) ProtoMessage() {} func (x *StatusResponse_P2PMetrics) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[4] + mi := &file_supernode_status_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -331,7 +401,7 @@ func (x *StatusResponse_P2PMetrics) ProtoReflect() protoreflect.Message { // Deprecated: Use StatusResponse_P2PMetrics.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 2} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3} } func (x *StatusResponse_P2PMetrics) GetDhtMetrics() *StatusResponse_P2PMetrics_DhtMetrics { @@ -386,7 +456,7 @@ type StatusResponse_Resources_CPU struct { func (x *StatusResponse_Resources_CPU) Reset() { *x = StatusResponse_Resources_CPU{} - mi := &file_supernode_status_proto_msgTypes[5] + mi := &file_supernode_status_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -398,7 +468,7 @@ func (x *StatusResponse_Resources_CPU) String() string { func (*StatusResponse_Resources_CPU) ProtoMessage() {} func (x *StatusResponse_Resources_CPU) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[5] + mi := &file_supernode_status_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -440,7 +510,7 @@ type StatusResponse_Resources_Memory struct { func (x *StatusResponse_Resources_Memory) Reset() { *x = StatusResponse_Resources_Memory{} - mi := &file_supernode_status_proto_msgTypes[6] + mi := &file_supernode_status_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -452,7 +522,7 @@ func (x *StatusResponse_Resources_Memory) String() string { func (*StatusResponse_Resources_Memory) ProtoMessage() {} func (x *StatusResponse_Resources_Memory) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[6] + mi := &file_supernode_status_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -509,7 +579,7 @@ type StatusResponse_Resources_Storage struct { func (x *StatusResponse_Resources_Storage) Reset() { *x = StatusResponse_Resources_Storage{} - mi := &file_supernode_status_proto_msgTypes[7] + mi := &file_supernode_status_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -521,7 +591,7 @@ func (x *StatusResponse_Resources_Storage) String() string { func (*StatusResponse_Resources_Storage) ProtoMessage() {} func (x *StatusResponse_Resources_Storage) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[7] + mi := &file_supernode_status_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -585,7 +655,7 @@ type StatusResponse_P2PMetrics_DhtMetrics struct { func (x *StatusResponse_P2PMetrics_DhtMetrics) Reset() { *x = StatusResponse_P2PMetrics_DhtMetrics{} - mi := &file_supernode_status_proto_msgTypes[8] + mi := &file_supernode_status_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -597,7 +667,7 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics) String() string { func (*StatusResponse_P2PMetrics_DhtMetrics) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DhtMetrics) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[8] + mi := &file_supernode_status_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -610,7 +680,7 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics) ProtoReflect() protoreflect.Messa // Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics_DhtMetrics) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 2, 0} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0} } func (x *StatusResponse_P2PMetrics_DhtMetrics) GetStoreSuccessRecent() []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint { @@ -654,7 +724,7 @@ type StatusResponse_P2PMetrics_HandleCounters struct { func (x *StatusResponse_P2PMetrics_HandleCounters) Reset() { *x = StatusResponse_P2PMetrics_HandleCounters{} - mi := &file_supernode_status_proto_msgTypes[9] + mi := &file_supernode_status_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -666,7 +736,7 @@ func (x *StatusResponse_P2PMetrics_HandleCounters) String() string { func (*StatusResponse_P2PMetrics_HandleCounters) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_HandleCounters) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[9] + mi := &file_supernode_status_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -679,7 +749,7 @@ func (x *StatusResponse_P2PMetrics_HandleCounters) ProtoReflect() protoreflect.M // Deprecated: Use StatusResponse_P2PMetrics_HandleCounters.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics_HandleCounters) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 2, 1} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 1} } func (x *StatusResponse_P2PMetrics_HandleCounters) GetTotal() int64 { @@ -725,7 +795,7 @@ type StatusResponse_P2PMetrics_BanEntry struct { func (x *StatusResponse_P2PMetrics_BanEntry) Reset() { *x = StatusResponse_P2PMetrics_BanEntry{} - mi := &file_supernode_status_proto_msgTypes[10] + mi := &file_supernode_status_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -737,7 +807,7 @@ func (x *StatusResponse_P2PMetrics_BanEntry) String() string { func (*StatusResponse_P2PMetrics_BanEntry) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_BanEntry) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[10] + mi := &file_supernode_status_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -750,7 +820,7 @@ func (x *StatusResponse_P2PMetrics_BanEntry) ProtoReflect() protoreflect.Message // Deprecated: Use StatusResponse_P2PMetrics_BanEntry.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics_BanEntry) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 2, 2} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 2} } func (x *StatusResponse_P2PMetrics_BanEntry) GetId() string { @@ -806,7 +876,7 @@ type StatusResponse_P2PMetrics_DatabaseStats struct { func (x *StatusResponse_P2PMetrics_DatabaseStats) Reset() { *x = StatusResponse_P2PMetrics_DatabaseStats{} - mi := &file_supernode_status_proto_msgTypes[11] + mi := &file_supernode_status_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -818,7 +888,7 @@ func (x *StatusResponse_P2PMetrics_DatabaseStats) String() string { func (*StatusResponse_P2PMetrics_DatabaseStats) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DatabaseStats) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[11] + mi := &file_supernode_status_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -831,7 +901,7 @@ func (x *StatusResponse_P2PMetrics_DatabaseStats) ProtoReflect() protoreflect.Me // Deprecated: Use StatusResponse_P2PMetrics_DatabaseStats.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics_DatabaseStats) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 2, 3} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 3} } func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbSizeMb() float64 { @@ -860,7 +930,7 @@ type StatusResponse_P2PMetrics_DiskStatus struct { func (x *StatusResponse_P2PMetrics_DiskStatus) Reset() { *x = StatusResponse_P2PMetrics_DiskStatus{} - mi := &file_supernode_status_proto_msgTypes[12] + mi := &file_supernode_status_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -872,7 +942,7 @@ func (x *StatusResponse_P2PMetrics_DiskStatus) String() string { func (*StatusResponse_P2PMetrics_DiskStatus) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DiskStatus) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[12] + mi := &file_supernode_status_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -885,7 +955,7 @@ func (x *StatusResponse_P2PMetrics_DiskStatus) ProtoReflect() protoreflect.Messa // Deprecated: Use StatusResponse_P2PMetrics_DiskStatus.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics_DiskStatus) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 2, 4} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 4} } func (x *StatusResponse_P2PMetrics_DiskStatus) GetAllMb() float64 { @@ -921,7 +991,7 @@ type StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint struct { func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Reset() { *x = StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{} - mi := &file_supernode_status_proto_msgTypes[15] + mi := &file_supernode_status_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -933,7 +1003,7 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) String() string func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[15] + mi := &file_supernode_status_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -946,7 +1016,7 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoReflect() // Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 2, 0, 0} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0, 0} } func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetTimeUnix() int64 { @@ -991,7 +1061,7 @@ type StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint struct { func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Reset() { *x = StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{} - mi := &file_supernode_status_proto_msgTypes[16] + mi := &file_supernode_status_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1003,7 +1073,7 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) String() strin func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[16] + mi := &file_supernode_status_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1016,7 +1086,7 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoReflect() // Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint.ProtoReflect.Descriptor instead. func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Descriptor() ([]byte, []int) { - return file_supernode_status_proto_rawDescGZIP(), []int{1, 2, 0, 1} + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0, 1} } func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetTimeUnix() int64 { @@ -1067,11 +1137,12 @@ const file_supernode_status_proto_rawDesc = "" + "\n" + "\x16supernode/status.proto\x12\tsupernode\"?\n" + "\rStatusRequest\x12.\n" + - "\x13include_p2p_metrics\x18\x01 \x01(\bR\x11includeP2pMetrics\"\xca\x17\n" + + "\x13include_p2p_metrics\x18\x01 \x01(\bR\x11includeP2pMetrics\"\x84\x19\n" + "\x0eStatusResponse\x12\x18\n" + "\aversion\x18\x01 \x01(\tR\aversion\x12%\n" + "\x0euptime_seconds\x18\x02 \x01(\x04R\ruptimeSeconds\x12A\n" + - "\tresources\x18\x03 \x01(\v2#.supernode.StatusResponse.ResourcesR\tresources\x12/\n" + + "\tresources\x18\x03 \x01(\v2#.supernode.StatusResponse.ResourcesR\tresources\x12K\n" + + "\rrunning_tasks\x18\x04 \x03(\v2&.supernode.StatusResponse.ServiceTasksR\frunningTasks\x12/\n" + "\x13registered_services\x18\x05 \x03(\tR\x12registeredServices\x12;\n" + "\anetwork\x18\x06 \x01(\v2!.supernode.StatusResponse.NetworkR\anetwork\x12\x12\n" + "\x04rank\x18\a \x01(\x05R\x04rank\x12\x1d\n" + @@ -1099,7 +1170,12 @@ const file_supernode_status_proto_rawDesc = "" + "\n" + "used_bytes\x18\x03 \x01(\x04R\tusedBytes\x12'\n" + "\x0favailable_bytes\x18\x04 \x01(\x04R\x0eavailableBytes\x12#\n" + - "\rusage_percent\x18\x05 \x01(\x01R\fusagePercent\x1aQ\n" + + "\rusage_percent\x18\x05 \x01(\x01R\fusagePercent\x1ak\n" + + "\fServiceTasks\x12!\n" + + "\fservice_name\x18\x01 \x01(\tR\vserviceName\x12\x19\n" + + "\btask_ids\x18\x02 \x03(\tR\ataskIds\x12\x1d\n" + + "\n" + + "task_count\x18\x03 \x01(\x05R\ttaskCount\x1aQ\n" + "\aNetwork\x12\x1f\n" + "\vpeers_count\x18\x01 \x01(\x05R\n" + "peersCount\x12%\n" + @@ -1175,47 +1251,49 @@ func file_supernode_status_proto_rawDescGZIP() []byte { return file_supernode_status_proto_rawDescData } -var file_supernode_status_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_supernode_status_proto_msgTypes = make([]protoimpl.MessageInfo, 18) var file_supernode_status_proto_goTypes = []any{ (*StatusRequest)(nil), // 0: supernode.StatusRequest (*StatusResponse)(nil), // 1: supernode.StatusResponse (*StatusResponse_Resources)(nil), // 2: supernode.StatusResponse.Resources - (*StatusResponse_Network)(nil), // 3: supernode.StatusResponse.Network - (*StatusResponse_P2PMetrics)(nil), // 4: supernode.StatusResponse.P2PMetrics - (*StatusResponse_Resources_CPU)(nil), // 5: supernode.StatusResponse.Resources.CPU - (*StatusResponse_Resources_Memory)(nil), // 6: supernode.StatusResponse.Resources.Memory - (*StatusResponse_Resources_Storage)(nil), // 7: supernode.StatusResponse.Resources.Storage - (*StatusResponse_P2PMetrics_DhtMetrics)(nil), // 8: supernode.StatusResponse.P2PMetrics.DhtMetrics - (*StatusResponse_P2PMetrics_HandleCounters)(nil), // 9: supernode.StatusResponse.P2PMetrics.HandleCounters - (*StatusResponse_P2PMetrics_BanEntry)(nil), // 10: supernode.StatusResponse.P2PMetrics.BanEntry - (*StatusResponse_P2PMetrics_DatabaseStats)(nil), // 11: supernode.StatusResponse.P2PMetrics.DatabaseStats - (*StatusResponse_P2PMetrics_DiskStatus)(nil), // 12: supernode.StatusResponse.P2PMetrics.DiskStatus - nil, // 13: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry - nil, // 14: supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry - (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 15: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint - (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 16: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint + (*StatusResponse_ServiceTasks)(nil), // 3: supernode.StatusResponse.ServiceTasks + (*StatusResponse_Network)(nil), // 4: supernode.StatusResponse.Network + (*StatusResponse_P2PMetrics)(nil), // 5: supernode.StatusResponse.P2PMetrics + (*StatusResponse_Resources_CPU)(nil), // 6: supernode.StatusResponse.Resources.CPU + (*StatusResponse_Resources_Memory)(nil), // 7: supernode.StatusResponse.Resources.Memory + (*StatusResponse_Resources_Storage)(nil), // 8: supernode.StatusResponse.Resources.Storage + (*StatusResponse_P2PMetrics_DhtMetrics)(nil), // 9: supernode.StatusResponse.P2PMetrics.DhtMetrics + (*StatusResponse_P2PMetrics_HandleCounters)(nil), // 10: supernode.StatusResponse.P2PMetrics.HandleCounters + (*StatusResponse_P2PMetrics_BanEntry)(nil), // 11: supernode.StatusResponse.P2PMetrics.BanEntry + (*StatusResponse_P2PMetrics_DatabaseStats)(nil), // 12: supernode.StatusResponse.P2PMetrics.DatabaseStats + (*StatusResponse_P2PMetrics_DiskStatus)(nil), // 13: supernode.StatusResponse.P2PMetrics.DiskStatus + nil, // 14: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry + nil, // 15: supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry + (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 16: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint + (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 17: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint } var file_supernode_status_proto_depIdxs = []int32{ 2, // 0: supernode.StatusResponse.resources:type_name -> supernode.StatusResponse.Resources - 3, // 1: supernode.StatusResponse.network:type_name -> supernode.StatusResponse.Network - 4, // 2: supernode.StatusResponse.p2p_metrics:type_name -> supernode.StatusResponse.P2PMetrics - 5, // 3: supernode.StatusResponse.Resources.cpu:type_name -> supernode.StatusResponse.Resources.CPU - 6, // 4: supernode.StatusResponse.Resources.memory:type_name -> supernode.StatusResponse.Resources.Memory - 7, // 5: supernode.StatusResponse.Resources.storage_volumes:type_name -> supernode.StatusResponse.Resources.Storage - 8, // 6: supernode.StatusResponse.P2PMetrics.dht_metrics:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics - 13, // 7: supernode.StatusResponse.P2PMetrics.network_handle_metrics:type_name -> supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry - 14, // 8: supernode.StatusResponse.P2PMetrics.conn_pool_metrics:type_name -> supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry - 10, // 9: supernode.StatusResponse.P2PMetrics.ban_list:type_name -> supernode.StatusResponse.P2PMetrics.BanEntry - 11, // 10: supernode.StatusResponse.P2PMetrics.database:type_name -> supernode.StatusResponse.P2PMetrics.DatabaseStats - 12, // 11: supernode.StatusResponse.P2PMetrics.disk:type_name -> supernode.StatusResponse.P2PMetrics.DiskStatus - 15, // 12: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint - 16, // 13: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint - 9, // 14: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters - 15, // [15:15] is the sub-list for method output_type - 15, // [15:15] is the sub-list for method input_type - 15, // [15:15] is the sub-list for extension type_name - 15, // [15:15] is the sub-list for extension extendee - 0, // [0:15] is the sub-list for field type_name + 3, // 1: supernode.StatusResponse.running_tasks:type_name -> supernode.StatusResponse.ServiceTasks + 4, // 2: supernode.StatusResponse.network:type_name -> supernode.StatusResponse.Network + 5, // 3: supernode.StatusResponse.p2p_metrics:type_name -> supernode.StatusResponse.P2PMetrics + 6, // 4: supernode.StatusResponse.Resources.cpu:type_name -> supernode.StatusResponse.Resources.CPU + 7, // 5: supernode.StatusResponse.Resources.memory:type_name -> supernode.StatusResponse.Resources.Memory + 8, // 6: supernode.StatusResponse.Resources.storage_volumes:type_name -> supernode.StatusResponse.Resources.Storage + 9, // 7: supernode.StatusResponse.P2PMetrics.dht_metrics:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics + 14, // 8: supernode.StatusResponse.P2PMetrics.network_handle_metrics:type_name -> supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry + 15, // 9: supernode.StatusResponse.P2PMetrics.conn_pool_metrics:type_name -> supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry + 11, // 10: supernode.StatusResponse.P2PMetrics.ban_list:type_name -> supernode.StatusResponse.P2PMetrics.BanEntry + 12, // 11: supernode.StatusResponse.P2PMetrics.database:type_name -> supernode.StatusResponse.P2PMetrics.DatabaseStats + 13, // 12: supernode.StatusResponse.P2PMetrics.disk:type_name -> supernode.StatusResponse.P2PMetrics.DiskStatus + 16, // 13: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint + 17, // 14: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint + 10, // 15: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name } func init() { file_supernode_status_proto_init() } @@ -1229,7 +1307,7 @@ func file_supernode_status_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_supernode_status_proto_rawDesc), len(file_supernode_status_proto_rawDesc)), NumEnums: 0, - NumMessages: 17, + NumMessages: 18, NumExtensions: 0, NumServices: 0, }, diff --git a/pkg/common/blocktracker/block_tracker.go b/pkg/common/blocktracker/block_tracker.go deleted file mode 100644 index 00f8c512..00000000 --- a/pkg/common/blocktracker/block_tracker.go +++ /dev/null @@ -1,121 +0,0 @@ -package blocktracker - -import ( - "context" - "sync" - "time" - - "github.com/LumeraProtocol/supernode/v2/pkg/errors" -) - -const ( - defaultRetries = 3 - defaultDelayDurationBetweenRetries = 5 * time.Second - defaultRPCConnectTimeout = 15 * time.Second - // Update duration in case last update was success - defaultSuccessUpdateDuration = 10 * time.Second - // Update duration in case last update was failed - prevent too much call to Lumera - defaultFailedUpdateDuration = 5 * time.Second - defaultNextBlockTimeout = 30 * time.Minute -) - -// LumeraClient defines interface functions BlockCntTracker expects from Lumera -type LumeraClient interface { - // GetBlockCount returns block height of blockchain - GetBlockCount(ctx context.Context) (int32, error) -} - -// BlockCntTracker defines a block tracker - that will keep current block height -type BlockCntTracker struct { - mtx sync.Mutex - LumeraClient LumeraClient - curBlockCnt int32 - lastSuccess time.Time - lastRetried time.Time - lastErr error - delayBetweenRetries time.Duration - retries int -} - -// New returns an instance of BlockCntTracker -func New(LumeraClient LumeraClient) *BlockCntTracker { - return &BlockCntTracker{ - LumeraClient: LumeraClient, - curBlockCnt: 0, - delayBetweenRetries: defaultDelayDurationBetweenRetries, - retries: defaultRetries, - } -} - -func (tracker *BlockCntTracker) refreshBlockCount(retries int) { - tracker.lastRetried = time.Now().UTC() - for i := 0; i < retries; i = i + 1 { - ctx, cancel := context.WithTimeout(context.Background(), defaultRPCConnectTimeout) - blockCnt, err := tracker.LumeraClient.GetBlockCount(ctx) - if err == nil { - tracker.curBlockCnt = blockCnt - tracker.lastSuccess = time.Now().UTC() - cancel() - tracker.lastErr = nil - return - } - cancel() - - tracker.lastErr = err - // delay between retries - time.Sleep(tracker.delayBetweenRetries) - } - -} - -// GetBlockCount return current block count -// it will get from cache if last refresh is small than defaultSuccessUpdateDuration -// or will refresh it by call from Lumera daemon to get the latest one if defaultSuccessUpdateDuration expired -func (tracker *BlockCntTracker) GetBlockCount() (int32, error) { - tracker.mtx.Lock() - defer tracker.mtx.Unlock() - - shouldRefresh := false - - if tracker.lastSuccess.After(tracker.lastRetried) { - if time.Now().UTC().After(tracker.lastSuccess.Add(defaultSuccessUpdateDuration)) { - shouldRefresh = true - } - } else { - // prevent update too much - if time.Now().UTC().After(tracker.lastRetried.Add(defaultFailedUpdateDuration)) { - shouldRefresh = true - } - } - - if shouldRefresh { - tracker.refreshBlockCount(tracker.retries) - } - - if tracker.curBlockCnt == 0 { - return 0, errors.Errorf("failed to get blockcount: %w", tracker.lastErr) - } - - return tracker.curBlockCnt, nil -} - -// WaitTillNextBlock will wait until next block height is greater than blockCnt -func (tracker *BlockCntTracker) WaitTillNextBlock(ctx context.Context, blockCnt int32) error { - for { - select { - case <-ctx.Done(): - return errors.Errorf("context done: %w", ctx.Err()) - case <-time.After(defaultNextBlockTimeout): - return errors.Errorf("timeout waiting for next block") - case <-time.After(defaultSuccessUpdateDuration): - curBlockCnt, err := tracker.GetBlockCount() - if err != nil { - return errors.Errorf("failed to get blockcount: %w", err) - } - - if curBlockCnt > blockCnt { - return nil - } - } - } -} diff --git a/pkg/common/blocktracker/block_tracker_test.go b/pkg/common/blocktracker/block_tracker_test.go deleted file mode 100644 index b070a4b7..00000000 --- a/pkg/common/blocktracker/block_tracker_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package blocktracker - -import ( - "context" - "errors" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -type fakePastelClient struct { - retBlockCnt int32 - retErr error -} - -func (fake *fakePastelClient) GetBlockCount(_ context.Context) (int32, error) { - return fake.retBlockCnt, fake.retErr -} - -func TestGetCountFirstTime(t *testing.T) { - tests := []struct { - name string - pastelClient *fakePastelClient - expectErr bool - }{ - { - name: "success", - pastelClient: &fakePastelClient{ - retBlockCnt: 10, - retErr: nil, - }, - expectErr: false, - }, - { - name: "fail", - pastelClient: &fakePastelClient{ - retBlockCnt: 0, - retErr: errors.New("error"), - }, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tracker := New(tt.pastelClient) - tracker.retries = 1 - blkCnt, err := tracker.GetBlockCount() - assert.Equal(t, tt.pastelClient.retBlockCnt, blkCnt) - if tt.expectErr { - assert.True(t, strings.Contains(err.Error(), tt.pastelClient.retErr.Error())) - } else { - assert.Nil(t, err) - } - }) - } -} - -func TestGetBlockCountNoRefresh(t *testing.T) { - pastelClient := &fakePastelClient{ - retBlockCnt: 10, - retErr: errors.New("error"), - } - - expectedBlk := int32(1) - tracker := New(pastelClient) - tracker.retries = 1 - tracker.curBlockCnt = expectedBlk - tracker.lastRetried = time.Now().UTC() - tracker.lastSuccess = time.Now().UTC() - - blkCnt, err := tracker.GetBlockCount() - assert.Equal(t, expectedBlk, blkCnt) - - assert.Nil(t, err) -} - -func TestGetBlockCountRefresh(t *testing.T) { - expectedBlk := int32(10) - pastelClient := &fakePastelClient{ - retBlockCnt: expectedBlk, - retErr: nil, - } - - tracker := New(pastelClient) - tracker.retries = 1 - tracker.curBlockCnt = 1 - tracker.lastRetried = time.Now().UTC().Add(-defaultSuccessUpdateDuration) - tracker.lastSuccess = time.Now().UTC().Add(-defaultSuccessUpdateDuration) - - blkCnt, err := tracker.GetBlockCount() - assert.Equal(t, expectedBlk, blkCnt) - - assert.Nil(t, err) -} diff --git a/pkg/common/task/action.go b/pkg/common/task/action.go deleted file mode 100644 index 227ebe35..00000000 --- a/pkg/common/task/action.go +++ /dev/null @@ -1,20 +0,0 @@ -package task - -import "context" - -// ActionFn represents a function that is run inside a goroutine. -type ActionFn func(ctx context.Context) error - -// Action represents the action of the task. -type Action struct { - fn ActionFn - doneCh chan struct{} -} - -// NewAction returns a new Action instance. -func NewAction(fn ActionFn) *Action { - return &Action{ - fn: fn, - doneCh: make(chan struct{}), - } -} diff --git a/pkg/common/task/state/state.go b/pkg/common/task/state/state.go deleted file mode 100644 index 05179a85..00000000 --- a/pkg/common/task/state/state.go +++ /dev/null @@ -1,174 +0,0 @@ -//go:generate mockery --name=State - -package state - -import ( - "context" - "sync" - "time" - - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/queries" - "github.com/LumeraProtocol/supernode/v2/pkg/types" -) - -// State represents a state of the task. -type State interface { - // Status returns the current status. - Status() *Status - - // SetStatusNotifyFunc sets a function to be called after the state is updated. - SetStatusNotifyFunc(fn func(status *Status)) - - // RequiredStatus returns an error if the current status doen't match the given one. - RequiredStatus(subStatus SubStatus) error - - // StatusHistory returns all history from the very beginning. - StatusHistory() []*Status - - // UpdateStatus updates the status of the state by creating a new status with the given `status`. - UpdateStatus(subStatus SubStatus) - - // SubscribeStatus returns a new subscription of the state. - SubscribeStatus() func() <-chan *Status - - //SetStateLog set the wallet node task status log to the state status log - SetStateLog(statusLog types.Fields) - - //InitialiseHistoryDB sets the connection to historyDB - InitialiseHistoryDB(store queries.LocalStoreInterface) -} - -type state struct { - status *Status - history []*Status - - notifyFn func(status *Status) - sync.RWMutex - subsCh []chan *Status - taskID string - statusLog types.Fields - historyDBStore queries.LocalStoreInterface -} - -// Status implements State.Status() -func (state *state) Status() *Status { - return state.status -} - -// SetStatusNotifyFunc implements State.SetStatusNotifyFunc() -func (state *state) SetStatusNotifyFunc(fn func(status *Status)) { - state.notifyFn = fn -} - -// RequiredStatus implements State.RequiredStatus() -func (state *state) RequiredStatus(subStatus SubStatus) error { - if state.status.Is(subStatus) { - return nil - } - return errors.Errorf("required status %q, current %q", subStatus, state.status) -} - -// StatusHistory implements State.StatusHistory() -func (state *state) StatusHistory() []*Status { - state.RLock() - defer state.RUnlock() - - return append(state.history, state.status) -} - -// UpdateStatus implements State.UpdateStatus() -func (state *state) UpdateStatus(subStatus SubStatus) { - state.Lock() - defer state.Unlock() - - status := NewStatus(subStatus) - state.history = append(state.history, state.status) - state.status = status - - history := types.TaskHistory{CreatedAt: time.Now().UTC(), TaskID: state.taskID, Status: status.String()} - if state.statusLog.IsValid() { - history.Details = types.NewDetails(status.String(), state.statusLog) - } - - if state.historyDBStore != nil { - if _, err := state.historyDBStore.InsertTaskHistory(history); err != nil { - logtrace.Error(context.Background(), "unable to store task status", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - } else { - store, err := queries.OpenHistoryDB() - if err != nil { - logtrace.Error(context.Background(), "error opening history db", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - - if store != nil { - defer store.CloseHistoryDB(context.Background()) - if _, err := store.InsertTaskHistory(history); err != nil { - logtrace.Error(context.Background(), "unable to store task status", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - } - } - - if state.notifyFn != nil { - state.notifyFn(status) - } - - for _, subCh := range state.subsCh { - subCh := subCh - go func() { - subCh <- status - }() - } -} - -// SubscribeStatus implements State.SubscribeStatus() -func (state *state) SubscribeStatus() func() <-chan *Status { - state.RLock() - defer state.RUnlock() - - subCh := make(chan *Status) - state.subsCh = append(state.subsCh, subCh) - - for _, status := range append(state.history, state.status) { - status := status - go func() { - subCh <- status - }() - } - - sub := func() <-chan *Status { - return subCh - } - return sub -} - -func (state *state) SetStateLog(statusLog types.Fields) { - state.statusLog = statusLog -} - -func (state *state) InitialiseHistoryDB(storeInterface queries.LocalStoreInterface) { - state.historyDBStore = storeInterface -} - -// New returns a new state instance. -func New(subStatus SubStatus, taskID string) State { - store, err := queries.OpenHistoryDB() - if err != nil { - logtrace.Error(context.Background(), "error opening history db", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - - if store != nil { - defer store.CloseHistoryDB(context.Background()) - - if _, err := store.InsertTaskHistory(types.TaskHistory{CreatedAt: time.Now().UTC(), TaskID: taskID, - Status: subStatus.String()}); err != nil { - logtrace.Error(context.Background(), "unable to store task status", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - } - - return &state{ - status: NewStatus(subStatus), - taskID: taskID, - } -} diff --git a/pkg/common/task/state/status.go b/pkg/common/task/state/status.go deleted file mode 100644 index b1b00da6..00000000 --- a/pkg/common/task/state/status.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:generate mockery --name=SubStatus - -package state - -import ( - "fmt" - "time" -) - -// SubStatus represents a sub-status that contains a description of the status. -type SubStatus interface { - fmt.Stringer - IsFinal() bool - IsFailure() bool -} - -// Status represents a state of the task. -type Status struct { - CreatedAt time.Time - SubStatus -} - -// Is returns true if the current `Status` matches to the given `statuses`. -func (status *Status) Is(subStatus SubStatus) bool { - return status.SubStatus == subStatus -} - -// NewStatus returns a new Status instance. -func NewStatus(subStatus SubStatus) *Status { - return &Status{ - CreatedAt: time.Now().UTC(), - SubStatus: subStatus, - } -} diff --git a/pkg/common/task/task.go b/pkg/common/task/task.go deleted file mode 100644 index adf173e4..00000000 --- a/pkg/common/task/task.go +++ /dev/null @@ -1,143 +0,0 @@ -//go:generate mockery --name=Task - -package task - -import ( - "context" - "sync" - - "github.com/LumeraProtocol/supernode/v2/pkg/common/task/state" - "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/random" -) - -// Task represent a worker task. -type Task interface { - state.State - - // ID returns id of the task. - ID() string - - // Run starts the task. - Run(ctx context.Context) error - - // Cancel tells a task to abandon its work. - // Cancel may be called by multiple goroutines simultaneously. - // After the first call, subsequent calls to a Cancel do nothing. - Cancel() - - // Done returns a channel when the task is canceled. - Done() <-chan struct{} - - // RunAction waits for new actions, starts handling each of them in a new goroutine. - RunAction(ctx context.Context) error - - // NewAction creates a new action and passes for the execution. - // It is used when it is necessary to run an action in the context of `Tasks` rather than the one who was called. - NewAction(fn ActionFn) <-chan struct{} - - // CloseActionCh closes action ch - CloseActionCh() -} - -type task struct { - state.State - - id string - - actionCh chan *Action - - doneMu sync.Mutex - doneCh chan struct{} - closeOnce sync.Once -} - -// ID implements Task.ID -func (task *task) ID() string { - return task.id -} - -// Run implements Task.Run -func (task *task) Run(_ context.Context) error { - return errors.New("task default run func not implemented") -} - -// Cancel implements Task.Cancel -func (task *task) Cancel() { - task.doneMu.Lock() - defer task.doneMu.Unlock() - - select { - case <-task.Done(): - logtrace.Debug(context.Background(), "task cancelled", logtrace.Fields{"task_id": task.ID()}) - return - default: - close(task.doneCh) - } -} - -// Done implements Task.Done -func (task *task) Done() <-chan struct{} { - return task.doneCh -} - -// RunAction implements Task.RunAction -func (task *task) RunAction(ctx context.Context) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - group, ctx := errgroup.WithContext(ctx) - for { - select { - case <-ctx.Done(): - logtrace.Debug(ctx, "context done", logtrace.Fields{"task_id": task.ID()}) - case <-task.Done(): - logtrace.Debug(ctx, "task done", logtrace.Fields{"task_id": task.ID()}) - cancel() - case action, ok := <-task.actionCh: - if !ok { - logtrace.Debug(ctx, "action channel closed", logtrace.Fields{"task_id": task.ID()}) - return group.Wait() - } - - currAction := action - group.Go(func() error { - defer close(currAction.doneCh) - - return currAction.fn(ctx) - }) - continue - } - break - } - - return group.Wait() -} - -// CloseActionCh safely closes the action channel -func (task *task) CloseActionCh() { - task.closeOnce.Do(func() { - close(task.actionCh) - }) -} - -// NewAction implements Task.NewAction -func (task *task) NewAction(fn ActionFn) <-chan struct{} { - act := NewAction(fn) - task.actionCh <- act - return act.doneCh -} - -// New returns a new task instance. -func New(status state.SubStatus) Task { - taskID, _ := random.String(8, random.Base62Chars) - - return &task{ - State: state.New(status, taskID), - id: taskID, - doneCh: make(chan struct{}), - actionCh: make(chan *Action), - } -} diff --git a/pkg/common/task/ticket.go b/pkg/common/task/ticket.go deleted file mode 100644 index 561b8f0b..00000000 --- a/pkg/common/task/ticket.go +++ /dev/null @@ -1,13 +0,0 @@ -package task - -type CascadeTicket struct { - Creator string `json:"creator"` - CreatorSignature []byte `json:"creator_signature"` - DataHash string `json:"data_hash"` - ActionID string `json:"action_id"` - BlockHeight int64 `json:"block_height"` - BlockHash []byte `json:"block_hash"` - RQIDsIC uint32 `json:"rqids_ic"` - RQIDsMax int32 `json:"rqids_max"` - RQIDs []string `json:"rq_ids"` -} diff --git a/pkg/common/task/worker.go b/pkg/common/task/worker.go deleted file mode 100644 index 14043079..00000000 --- a/pkg/common/task/worker.go +++ /dev/null @@ -1,144 +0,0 @@ -package task - -import ( - "context" - "sync" - "time" - - "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" -) - -// Worker represents a pool of the task. -type Worker struct { - sync.Mutex - - tasks []Task - taskCh chan Task -} - -// Tasks returns all tasks. -func (worker *Worker) Tasks() []Task { - worker.Lock() - defer worker.Unlock() - - // return a shallow copy to avoid data races - copied := make([]Task, len(worker.tasks)) - copy(copied, worker.tasks) - return copied -} - -// Task returns the task by the given id. -func (worker *Worker) Task(taskID string) Task { - worker.Lock() - defer worker.Unlock() - - for _, task := range worker.tasks { - if task.ID() == taskID { - return task - } - } - return nil -} - -// AddTask adds the new task. -func (worker *Worker) AddTask(task Task) { - worker.Lock() - defer worker.Unlock() - - worker.tasks = append(worker.tasks, task) - worker.taskCh <- task - - // Proactively remove the task once it's done to prevent lingering entries - go func(t Task) { - <-t.Done() - // remove promptly when the task signals completion/cancelation - worker.RemoveTask(t) - }(task) -} - -// RemoveTask removes the task. -func (worker *Worker) RemoveTask(subTask Task) { - worker.Lock() - defer worker.Unlock() - - for i, task := range worker.tasks { - if task == subTask { - worker.tasks = append(worker.tasks[:i], worker.tasks[i+1:]...) - return - } - } -} - -// Run waits for new tasks, starts handling each of them in a new goroutine. -func (worker *Worker) Run(ctx context.Context) error { - group, _ := errgroup.WithContext(ctx) // Create an error group but ignore the derived context - // Background sweeper to prune finalized tasks that might linger - // even if the task's Run wasn't executed to completion. - sweeperCtx, sweeperCancel := context.WithCancel(ctx) - defer sweeperCancel() - go worker.cleanupLoop(sweeperCtx) - for { - select { - case <-ctx.Done(): - logtrace.Warn(ctx, "Worker run stopping", logtrace.Fields{logtrace.FieldError: ctx.Err().Error()}) - return group.Wait() - case t := <-worker.taskCh: // Rename here - currentTask := t // Capture the loop variable - group.Go(func() error { - defer func() { - if r := recover(); r != nil { - logtrace.Error(ctx, "Recovered from panic in common task's worker run", logtrace.Fields{"task": currentTask.ID(), "error": r}) - } - - logtrace.Debug(ctx, "Task Removed", logtrace.Fields{"task": currentTask.ID()}) - // Remove the task from the worker's task list - worker.RemoveTask(currentTask) - }() - - return currentTask.Run(ctx) // Use the captured variable - }) - } - } -} - -// NewWorker returns a new Worker instance. -func NewWorker() *Worker { - w := &Worker{taskCh: make(chan Task)} - return w -} - -// cleanupLoop periodically removes tasks that are in a final state for a grace period -func (worker *Worker) cleanupLoop(ctx context.Context) { - const ( - cleanupInterval = 30 * time.Second - finalTaskTTL = 2 * time.Minute - ) - - ticker := time.NewTicker(cleanupInterval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - now := time.Now() - worker.Lock() - // iterate and compact in-place - kept := worker.tasks[:0] - for _, t := range worker.tasks { - st := t.Status() - if st != nil && st.SubStatus != nil && st.SubStatus.IsFinal() { - if now.Sub(st.CreatedAt) >= finalTaskTTL { - // drop this finalized task - continue - } - } - kept = append(kept, t) - } - worker.tasks = kept - worker.Unlock() - } - } -} diff --git a/pkg/common/task/worker_test.go b/pkg/common/task/worker_test.go deleted file mode 100644 index 4c5f21ac..00000000 --- a/pkg/common/task/worker_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package task - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestWorkerTasks(t *testing.T) { - t.Parallel() - - type fields struct { - tasks []Task - } - tests := []struct { - name string - fields fields - want []Task - }{ - { - name: "retrieve tasks", - fields: fields{ - tasks: []Task{&task{id: "1"}, &task{id: "2"}}, - }, - want: []Task{&task{id: "1"}, &task{id: "2"}}, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - worker := &Worker{ - tasks: tt.fields.tasks, - } - assert.Equal(t, tt.want, worker.Tasks()) - }) - } -} - -func TestWorkerTask(t *testing.T) { - t.Parallel() - - type fields struct { - tasks []Task - } - type args struct { - taskID string - } - tests := []struct { - name string - fields fields - args args - want Task - }{ - { - name: "get task with id 1", - fields: fields{ - tasks: []Task{&task{id: "1"}, &task{id: "2"}}, - }, - args: args{"2"}, - want: &task{id: "2"}, - }, - { - name: "get not exist task", - fields: fields{ - tasks: []Task{&task{id: "1"}, &task{id: "2"}}, - }, - args: args{"3"}, - want: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - worker := &Worker{ - tasks: tt.fields.tasks, - } - assert.Equal(t, tt.want, worker.Task(tt.args.taskID)) - }) - } -} - -func TestWorkerAddTask(t *testing.T) { - t.Parallel() - - type args struct { - task Task - } - tests := []struct { - name string - args args - want []Task - }{ - { - name: "add task", - args: args{&task{id: "1"}}, - want: []Task{&task{id: "1"}}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - worker := &Worker{ - taskCh: make(chan Task), - } - - go func() { - worker.AddTask(tt.args.task) - }() - - <-worker.taskCh - tasks := worker.tasks - assert.Equal(t, tt.want, tasks) - - }) - } -} - -func TestWorkerRemoveTask(t *testing.T) { - t.Parallel() - - type args struct { - subTask Task - } - tests := []struct { - name string - args args - want []Task - }{ - { - name: "removed task", - args: args{&task{id: "1"}}, - want: []Task{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - worker := &Worker{ - tasks: []Task{tt.args.subTask}, - } - - worker.RemoveTask(tt.args.subTask) - assert.Equal(t, tt.want, worker.tasks) - }) - } -} diff --git a/pkg/task/handle.go b/pkg/task/handle.go new file mode 100644 index 00000000..33359e38 --- /dev/null +++ b/pkg/task/handle.go @@ -0,0 +1,65 @@ +package task + +import ( + "context" + "sync" + "time" + + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" +) + +// Handle manages a running task with an optional watchdog. +// It ensures Start and End are paired, logs start/end, and auto-ends on timeout. +type Handle struct { + service string + id string + stop chan struct{} + once sync.Once +} + +// Start starts tracking a task and returns a Handle that will ensure the +// task is ended. A watchdog is started to auto-end the task after timeout +// to avoid indefinitely stuck running tasks in status reporting. +func Start(ctx context.Context, service, id string, timeout time.Duration) *Handle { + if service == "" || id == "" { + return &Handle{} + } + Default.Start(service, id) + logtrace.Info(ctx, "task: started", logtrace.Fields{"service": service, "task_id": id}) + + g := &Handle{service: service, id: id, stop: make(chan struct{})} + if timeout > 0 { + go func() { + select { + case <-time.After(timeout): + // Auto-end if not already ended + g.endWith(ctx, true) + case <-g.stop: + // normal completion + } + }() + } + return g +} + +// End stops tracking the task. Safe to call multiple times. +func (g *Handle) End(ctx context.Context) { + g.endWith(ctx, false) +} + +// EndWith ends the guard and logs accordingly. If expired is true, +// it emits a warning and ends the task to avoid stuck status. +func (g *Handle) endWith(ctx context.Context, expired bool) { + if g == nil || g.service == "" || g.id == "" { + return + } + g.once.Do(func() { + close(g.stop) + Default.End(g.service, g.id) + if expired { + logtrace.Warn(ctx, "task: watchdog expired", logtrace.Fields{"service": g.service, "task_id": g.id}) + } else { + logtrace.Info(ctx, "task: ended", logtrace.Fields{"service": g.service, "task_id": g.id}) + } + }) +} diff --git a/pkg/task/task.go b/pkg/task/task.go new file mode 100644 index 00000000..6bf50e78 --- /dev/null +++ b/pkg/task/task.go @@ -0,0 +1,82 @@ +// Package task provides a lean, concurrency-safe, in-memory tracker for +// live tasks running inside a service. It is designed to be generic and +// reusable across multiple features (e.g., cascade upload/download) and +// only tracks tasks while the enclosing RPC/handler is alive. No +// persistence, progress reporting, or background processing is included. +package task + +import "sync" + +// Tracker defines a minimal interface for tracking live tasks per service. +// Implementations must be concurrency-safe. All methods are non-blocking +// and best-effort; invalid inputs are ignored. +type Tracker interface { + Start(service, taskID string) + End(service, taskID string) + Snapshot() map[string][]string +} + +// InMemoryTracker is a lean, concurrency-safe tracker of live tasks. +// It stores only in-memory state for the lifetime of the process and +// returns copies when asked for a snapshot to ensure isolation. +type InMemoryTracker struct { + mu sync.RWMutex + // service -> set(taskID) + data map[string]map[string]struct{} +} + +// Default is a package-level tracker instance for convenience. +var Default = New() + +// New creates and returns a new in-memory tracker. +func New() *InMemoryTracker { + return &InMemoryTracker{data: make(map[string]map[string]struct{})} +} + +// Start marks a task as running under a given service. Empty arguments +// are ignored. Calling Start with the same (service, taskID) pair is idempotent. +func (t *InMemoryTracker) Start(service, taskID string) { + if service == "" || taskID == "" { + return + } + t.mu.Lock() + m, ok := t.data[service] + if !ok { + m = make(map[string]struct{}) + t.data[service] = m + } + m[taskID] = struct{}{} + t.mu.Unlock() +} + +// End removes a running task under a given service. Empty arguments +// are ignored. Removing a non-existent (service, taskID) pair is a no-op. +func (t *InMemoryTracker) End(service, taskID string) { + if service == "" || taskID == "" { + return + } + t.mu.Lock() + if m, ok := t.data[service]; ok { + delete(m, taskID) + if len(m) == 0 { + delete(t.data, service) + } + } + t.mu.Unlock() +} + +// Snapshot returns a copy of the current running tasks per service. +// The returned map and slices are independent of internal state. +func (t *InMemoryTracker) Snapshot() map[string][]string { + out := make(map[string][]string) + t.mu.RLock() + for svc, m := range t.data { + ids := make([]string, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + out[svc] = ids + } + t.mu.RUnlock() + return out +} diff --git a/pkg/task/task_test.go b/pkg/task/task_test.go new file mode 100644 index 00000000..624b900f --- /dev/null +++ b/pkg/task/task_test.go @@ -0,0 +1,161 @@ +package task + +import ( + "context" + "sync" + "testing" + "time" +) + +func TestStartEndSnapshot(t *testing.T) { + tr := New() + + // Initially empty + if snap := tr.Snapshot(); len(snap) != 0 { + t.Fatalf("expected empty snapshot, got %#v", snap) + } + + // Start two tasks under same service + tr.Start("svc", "id1") + tr.Start("svc", "id2") + + snap := tr.Snapshot() + ids, ok := snap["svc"] + if !ok { + t.Fatalf("expected service 'svc' in snapshot") + } + if len(ids) != 2 { + t.Fatalf("expected 2 ids, got %d (%v)", len(ids), ids) + } + + // End one task + tr.End("svc", "id1") + snap = tr.Snapshot() + ids = snap["svc"] + if len(ids) != 1 { + t.Fatalf("expected 1 id, got %d (%v)", len(ids), ids) + } + if ids[0] != "id2" && ids[0] != "id1" { // order not guaranteed; check that id2 remains by set membership + // Build a small set for clarity + m := map[string]struct{}{} + for _, v := range ids { + m[v] = struct{}{} + } + if _, ok := m["id2"]; !ok { + t.Fatalf("expected id2 to remain, got %v", ids) + } + } + + // End last task + tr.End("svc", "id2") + snap = tr.Snapshot() + if _, ok := snap["svc"]; ok { + t.Fatalf("expected service removed after last task ended, got %v", snap) + } +} + +func TestInvalidInputsAndIsolation(t *testing.T) { + tr := New() + + // Invalid inputs should be ignored + tr.Start("", "id") + tr.Start("svc", "") + tr.End("", "id") + tr.End("svc", "") + if snap := tr.Snapshot(); len(snap) != 0 { + t.Fatalf("expected empty snapshot for invalid inputs, got %#v", snap) + } + + // Snapshot must be a copy + tr.Start("svc", "id") + snap := tr.Snapshot() + // mutate snapshot map and slice + delete(snap, "svc") + snap2 := tr.Snapshot() + if _, ok := snap2["svc"]; !ok { + t.Fatalf("mutating snapshot should not affect tracker state") + } +} + +// TestConcurrentAccessNoPanic ensures that concurrent Start/End/Snapshot +// operations do not panic due to unsafe map access. +func TestConcurrentAccessNoPanic(t *testing.T) { + tr := New() + + // Run a mix of writers and readers concurrently. + var wg sync.WaitGroup + startWriters := 8 + snapReaders := 4 + loops := 1000 + + // Writers: repeatedly start/end tasks across a few services. + for w := 0; w < startWriters; w++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + for i := 0; i < loops; i++ { + svc := "svc" + string('A'+rune(id%3)) // svcA, svcB, svcC + tid := svc + ":t" + fmtInt(i%5) + tr.Start(svc, tid) + if i%2 == 0 { + tr.End(svc, tid) + } + } + }(w) + } + + // Readers: take snapshots concurrently. + for r := 0; r < snapReaders; r++ { + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < loops; i++ { + _ = tr.Snapshot() + } + }() + } + + // If there is any concurrent map access bug, the test runner would panic. + done := make(chan struct{}) + go func() { wg.Wait(); close(done) }() + select { + case <-done: + // ok + case <-time.After(5 * time.Second): + t.Fatal("concurrent access test timed out") + } +} + +// fmtInt provides a tiny int-to-string helper to avoid importing strconv. +func fmtInt(i int) string { return string('0' + rune(i)) } + +func TestHandleIdempotentAndWatchdog(t *testing.T) { + // Swap the default tracker to isolate + orig := Default + Default = New() + defer func() { Default = orig }() + + ctx := context.Background() + + // Idempotent End + g := Start(ctx, "svc.guard", "id-1", 0) + g.End(ctx) + g.End(ctx) // no panic, no double-end crash + + // Watchdog auto-end: use a small timeout + g2 := Start(ctx, "svc.guard", "id-2", 50*time.Millisecond) + _ = g2 // ensure guard stays referenced until timeout path + // Do not call End; let the watchdog fire + time.Sleep(120 * time.Millisecond) + + // After watchdog, the task should not be listed + snap := Default.Snapshot() + if ids, ok := snap["svc.guard"]; ok { + // If still present, ensure id-2 is not in the list + for _, id := range ids { + if id == "id-2" { + t.Fatalf("expected watchdog to remove id-2 from svc.guard; snapshot: %v", ids) + } + } + } +} diff --git a/proto/supernode/status.proto b/proto/supernode/status.proto index c9edc5e7..d944d614 100644 --- a/proto/supernode/status.proto +++ b/proto/supernode/status.proto @@ -42,6 +42,13 @@ message StatusResponse { string hardware_summary = 4; // Formatted hardware summary (e.g., "8 cores / 32GB RAM") } + // ServiceTasks contains task information for a specific service + message ServiceTasks { + string service_name = 1; + repeated string task_ids = 2; + int32 task_count = 3; + } + // Network information message Network { int32 peers_count = 1; // Number of connected peers in P2P network @@ -49,6 +56,7 @@ message StatusResponse { } Resources resources = 3; + repeated ServiceTasks running_tasks = 4; // Services with currently running tasks repeated string registered_services = 5; // All registered/available services Network network = 6; // P2P network information int32 rank = 7; // Rank in the top supernodes list (0 if not in top list) diff --git a/sdk/task/cascade.go b/sdk/task/cascade.go index c13b94a1..e7de4992 100644 --- a/sdk/task/cascade.go +++ b/sdk/task/cascade.go @@ -46,6 +46,8 @@ func (t *CascadeTask) Run(ctx context.Context) error { return err } + // Deterministic per-action ordering to distribute load fairly + supernodes = orderSupernodesByDeterministicDistance(t.ActionID, supernodes) t.LogEvent(ctx, event.SDKSupernodesFound, "Supernodes found.", event.EventData{event.KeyCount: len(supernodes)}) // 2 - Register with the supernodes @@ -80,6 +82,11 @@ func (t *CascadeTask) registerWithSupernodes(ctx context.Context, supernodes lum event.KeySupernodeAddress: sn.CosmosAddress, event.KeyIteration: idx + 1, }) + // Re-check serving status just-in-time to avoid calling a node that became busy/down + if !t.isServing(ctx, sn) { + t.logger.Info(ctx, "skip supernode: not serving", "supernode", sn.GrpcEndpoint, "sn-address", sn.CosmosAddress, "iteration", idx+1) + continue + } if err := t.attemptRegistration(ctx, idx, sn, clientFactory, req); err != nil { // t.LogEvent(ctx, event.SDKRegistrationFailure, "registration with supernode failed", event.EventData{ diff --git a/sdk/task/download.go b/sdk/task/download.go index 2c727ae9..4adecf21 100644 --- a/sdk/task/download.go +++ b/sdk/task/download.go @@ -43,6 +43,8 @@ func (t *CascadeDownloadTask) Run(ctx context.Context) error { t.LogEvent(ctx, event.SDKTaskFailed, "task failed", event.EventData{event.KeyError: err.Error()}) return err } + // Deterministic per-action ordering to distribute load fairly + supernodes = orderSupernodesByDeterministicDistance(t.ActionID, supernodes) t.LogEvent(ctx, event.SDKSupernodesFound, "super-nodes found", event.EventData{event.KeyCount: len(supernodes)}) // 2 – download from super-nodes @@ -88,6 +90,12 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern event.KeyIteration: iteration, }) + // Re-check serving status just-in-time to avoid calling a node that became busy/down + if !t.isServing(ctx, sn) { + t.logger.Info(ctx, "skip supernode: not serving", "supernode", sn.GrpcEndpoint, "sn-address", sn.CosmosAddress, "iteration", iteration) + continue + } + if err := t.attemptDownload(ctx, sn, clientFactory, req); err != nil { // Log failure and continue to next supernode t.LogEvent(ctx, event.SDKDownloadFailure, "download from super-node failed", event.EventData{ @@ -116,6 +124,16 @@ func (t *CascadeDownloadTask) attemptDownload( factory *net.ClientFactory, req *supernodeservice.CascadeSupernodeDownloadRequest, ) error { + // Recheck liveness/busyness just before attempting download to handle delays + if !t.isServing(parent, sn) { + // Emit a concise event; detailed rejection reasons are logged inside isServing + t.LogEvent(parent, event.SDKDownloadFailure, "precheck: supernode not serving/busy", event.EventData{ + event.KeySupernode: sn.GrpcEndpoint, + event.KeySupernodeAddress: sn.CosmosAddress, + event.KeyReason: "precheck_not_serving_or_busy", + }) + return fmt.Errorf("precheck: supernode not serving/busy") + } ctx, cancel := context.WithTimeout(parent, downloadTimeout) defer cancel() diff --git a/sdk/task/helpers.go b/sdk/task/helpers.go index 2ea8bcaa..2d2b7391 100644 --- a/sdk/task/helpers.go +++ b/sdk/task/helpers.go @@ -4,10 +4,13 @@ import ( "context" "encoding/base64" "fmt" + "math/big" "os" "path/filepath" + "sort" "strings" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" ) @@ -136,3 +139,47 @@ func ensureOutputPathWithFilename(outputPath, filename string) string { // Otherwise, append the filename to the path return filepath.Join(outputPath, filename) } + +func orderSupernodesByDeterministicDistance(seed string, sns lumera.Supernodes) lumera.Supernodes { + if len(sns) == 0 || seed == "" { + return sns + } + // Precompute seed hash (blake3) + seedHash, err := cascadekit.ComputeBlake3Hash([]byte(seed)) + if err != nil { + return sns + } + + type nodeDist struct { + sn lumera.Supernode + distance *big.Int + } + nd := make([]nodeDist, 0, len(sns)) + for _, sn := range sns { + id := sn.CosmosAddress + if id == "" { + id = sn.GrpcEndpoint + } + nHash, err := cascadekit.ComputeBlake3Hash([]byte(id)) + if err != nil { + nd = append(nd, nodeDist{sn: sn, distance: new(big.Int).SetInt64(0)}) + continue + } + // XOR distance across min length + l := len(seedHash) + if len(nHash) < l { + l = len(nHash) + } + xor := make([]byte, l) + for i := 0; i < l; i++ { + xor[i] = seedHash[i] ^ nHash[i] + } + nd = append(nd, nodeDist{sn: sn, distance: new(big.Int).SetBytes(xor)}) + } + sort.Slice(nd, func(i, j int) bool { return nd[i].distance.Cmp(nd[j].distance) < 0 }) + out := make(lumera.Supernodes, len(nd)) + for i := range nd { + out[i] = nd[i].sn + } + return out +} diff --git a/sdk/task/task.go b/sdk/task/task.go index bb402975..605819f2 100644 --- a/sdk/task/task.go +++ b/sdk/task/task.go @@ -8,7 +8,6 @@ import ( sdkmath "cosmossdk.io/math" "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" txmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" "github.com/LumeraProtocol/supernode/v2/sdk/config" @@ -124,7 +123,7 @@ func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool { PeerType: t.config.Account.PeerType, }).CreateClient(ctx, sn) if err != nil { - logtrace.Debug(ctx, "Failed to create client for supernode", logtrace.Fields{logtrace.FieldMethod: "isServing"}) + t.logger.Info(ctx, "reject supernode: client create failed", "reason", err.Error(), "endpoint", sn.GrpcEndpoint, "cosmos", sn.CosmosAddress) return false } defer client.Close(ctx) @@ -132,26 +131,41 @@ func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool { // First check gRPC health resp, err := client.HealthCheck(ctx) if err != nil || resp.Status != grpc_health_v1.HealthCheckResponse_SERVING { + statusStr := "nil" + if resp != nil { + statusStr = resp.Status.String() + } + t.logger.Info(ctx, "reject supernode: health not SERVING", "error", err, "status", statusStr) return false } // Then check P2P peers count via status status, err := client.GetSupernodeStatus(ctx) if err != nil { + t.logger.Info(ctx, "reject supernode: status fetch failed", "error", err) return false } if status.Network.PeersCount <= 1 { + t.logger.Info(ctx, "reject supernode: insufficient peers", "peers_count", status.Network.PeersCount) + return false + } + // Busy check: exclude supernodes that report running tasks + if rt := status.GetRunningTasks(); len(rt) > 0 { + svc := rt[0].GetServiceName() + t.logger.Info(ctx, "reject supernode: busy", "service", svc) return false } denom := txmod.DefaultFeeDenom // base denom (micro), e.g., "ulume" bal, err := t.client.GetBalance(ctx, sn.CosmosAddress, denom) if err != nil || bal == nil || bal.Balance == nil { + t.logger.Info(ctx, "reject supernode: balance fetch failed or empty", "error", err) return false } // Require at least 1 LUME = 10^6 micro (ulume) min := sdkmath.NewInt(1_000_000) if bal.Balance.Amount.LT(min) { + t.logger.Info(ctx, "reject supernode: insufficient balance", "amount", bal.Balance.Amount.String(), "min", min.String()) return false } diff --git a/sn-manager/internal/updater/updater.go b/sn-manager/internal/updater/updater.go index 548af07b..3fe4fd3f 100644 --- a/sn-manager/internal/updater/updater.go +++ b/sn-manager/internal/updater/updater.go @@ -3,28 +3,32 @@ package updater import ( "context" "fmt" + "io" "log" + "net/http" "os" "path/filepath" "strings" "time" + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/version" "github.com/LumeraProtocol/supernode/v2/supernode/transport/gateway" + "google.golang.org/protobuf/encoding/protojson" ) // Global updater timing constants const ( // gatewayTimeout bounds the local gateway status probe - // gatewayTimeout = 15 * time.Second + gatewayTimeout = 15 * time.Second // updateCheckInterval is how often the periodic updater runs - updateCheckInterval = 5 * time.Minute + updateCheckInterval = 10 * time.Minute // forceUpdateAfter is the age threshold after a release is published // beyond which updates are applied regardless of normal gates (idle, policy) - forceUpdateAfter = 5 * time.Minute + forceUpdateAfter = 60 * time.Minute ) type AutoUpdater struct { @@ -132,37 +136,43 @@ func (u *AutoUpdater) ShouldUpdate(current, latest string) bool { return false } -// isGatewayIdle returns (idle, isError). When isError is true, -// the gateway could not be reliably checked (network/error/invalid). -// When isError is false and idle is false, the gateway is busy. func (u *AutoUpdater) isGatewayIdle() (bool, bool) { - // client := &http.Client{Timeout: gatewayTimeout} - - // resp, err := client.Get(u.gatewayURL) - // if err != nil { - // log.Printf("Failed to check gateway status: %v", err) - // // Error contacting gateway - // return false, true - // } - // defer resp.Body.Close() - - // if resp.StatusCode != http.StatusOK { - // log.Printf("Gateway returned status %d, not safe to update", resp.StatusCode) - // return false, true - // } - - // var status pb.StatusResponse - // body, err := io.ReadAll(resp.Body) - // if err != nil { - // log.Printf("Failed to read gateway response: %v", err) - // return false, true - // } - // if err := protojson.Unmarshal(body, &status); err != nil { - // log.Printf("Failed to decode gateway response: %v", err) - // return false, true - // } - - // // TEMP: tasks are not available in the new gateway endpoint; skip busy-check + client := &http.Client{Timeout: gatewayTimeout} + + resp, err := client.Get(u.gatewayURL) + if err != nil { + log.Printf("Failed to check gateway status: %v", err) + // Error contacting gateway + return false, true + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + log.Printf("Gateway returned status %d, not safe to update", resp.StatusCode) + return false, true + } + + var status pb.StatusResponse + body, err := io.ReadAll(resp.Body) + if err != nil { + log.Printf("Failed to read gateway response: %v", err) + return false, true + } + if err := protojson.Unmarshal(body, &status); err != nil { + log.Printf("Failed to decode gateway response: %v", err) + return false, true + } + + // Idle when there are no running tasks across all services + if len(status.GetRunningTasks()) == 0 { + return true, false + } + for _, st := range status.GetRunningTasks() { + if st.GetTaskCount() > 0 || len(st.GetTaskIds()) > 0 { + log.Printf("Gateway busy: service=%s tasks=%d", st.GetServiceName(), st.GetTaskCount()) + return false, false + } + } return true, false } diff --git a/supernode/status/service.go b/supernode/status/service.go index 553f7e2d..5c9c4cd6 100644 --- a/supernode/status/service.go +++ b/supernode/status/service.go @@ -10,6 +10,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/task" "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/LumeraProtocol/supernode/v2/supernode/config" ) @@ -98,6 +99,17 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric resp.Network.PeersCount = 0 resp.Network.PeerAddresses = []string{} + // Populate running tasks from the global in-memory tracker + if snap := task.Default.Snapshot(); len(snap) > 0 { + for svc, ids := range snap { + resp.RunningTasks = append(resp.RunningTasks, &pb.StatusResponse_ServiceTasks{ + ServiceName: svc, + TaskIds: ids, + TaskCount: int32(len(ids)), + }) + } + } + // Prepare optional P2P metrics container pm := &pb.StatusResponse_P2PMetrics{ DhtMetrics: &pb.StatusResponse_P2PMetrics_DhtMetrics{}, diff --git a/supernode/transport/grpc/cascade/handler.go b/supernode/transport/grpc/cascade/handler.go index 2a361a0f..e99477ac 100644 --- a/supernode/transport/grpc/cascade/handler.go +++ b/supernode/transport/grpc/cascade/handler.go @@ -7,10 +7,12 @@ import ( "io" "os" "path/filepath" + "time" pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + tasks "github.com/LumeraProtocol/supernode/v2/pkg/task" cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" "lukechampine.com/blake3" ) @@ -27,6 +29,12 @@ func NewCascadeActionServer(factory cascadeService.CascadeServiceFactory) *Actio // calculateOptimalChunkSize returns an optimal chunk size based on file size // to balance throughput and memory usage + + var ( + startedTask bool + handle *tasks.Handle + ) + func calculateOptimalChunkSize(fileSize int64) int { const ( minChunkSize = 64 * 1024 // 64 KB minimum @@ -124,6 +132,12 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er case *pb.RegisterRequest_Metadata: metadata = x.Metadata logtrace.Debug(ctx, "received metadata", logtrace.Fields{"task_id": metadata.TaskId, "action_id": metadata.ActionId}) + // Start live task tracking on first metadata (covers remaining stream and processing) + if !startedTask { + startedTask = true + handle = tasks.Start(ctx, "cascade.upload", metadata.ActionId, 30*time.Minute) + defer handle.End(ctx) + } } } @@ -189,6 +203,10 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS } logtrace.Debug(ctx, "download request received", fields) + // Start live task tracking for the entire download RPC (including file streaming) + dlHandle := tasks.Start(ctx, "cascade.download", req.GetActionId(), 30*time.Minute) + defer dlHandle.End(ctx) + // Prepare to capture decoded file path from task events var decodedFilePath string var tmpDir string From f6035f8e9b6cb5203c9cafe64adb3a8a457e781d Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Fri, 10 Oct 2025 17:36:59 +0500 Subject: [PATCH 18/36] Add active filter in Supernode Block Query --- pkg/lumera/modules/supernode/impl.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/lumera/modules/supernode/impl.go b/pkg/lumera/modules/supernode/impl.go index d0b633a8..064e30c0 100644 --- a/pkg/lumera/modules/supernode/impl.go +++ b/pkg/lumera/modules/supernode/impl.go @@ -30,6 +30,7 @@ func newModule(conn *grpc.ClientConn) (Module, error) { func (m *module) GetTopSuperNodesForBlock(ctx context.Context, blockHeight uint64) (*types.QueryGetTopSuperNodesForBlockResponse, error) { resp, err := m.client.GetTopSuperNodesForBlock(ctx, &types.QueryGetTopSuperNodesForBlockRequest{ BlockHeight: int32(blockHeight), + State: types.SuperNodeStateActive.String(), }) if err != nil { return nil, fmt.Errorf("failed to get top supernodes: %w", err) From f5d9e652300ecbec2acf9219be4d2e103aa1e483 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Fri, 10 Oct 2025 18:54:39 +0500 Subject: [PATCH 19/36] fix : Dont call unregister ALTS on connection close --- sdk/net/impl.go | 13 ++--- sdk/task/cascade.go | 11 ++++- sdk/task/download.go | 111 ------------------------------------------- 3 files changed, 16 insertions(+), 119 deletions(-) diff --git a/sdk/net/impl.go b/sdk/net/impl.go index 77ac7de9..e597ccbb 100644 --- a/sdk/net/impl.go +++ b/sdk/net/impl.go @@ -3,6 +3,7 @@ package net import ( "context" "fmt" + "sync" "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials" @@ -29,13 +30,17 @@ type supernodeClient struct { // Verify interface compliance at compile time var _ SupernodeClient = (*supernodeClient)(nil) +// ensure ALTS protocols are registered once per process +var registerALTSOnce sync.Once + // NewSupernodeClient creates a new supernode client func NewSupernodeClient(ctx context.Context, logger log.Logger, keyring keyring.Keyring, factoryConfig FactoryConfig, targetSupernode lumera.Supernode, lumeraClient lumera.Client, clientOptions *client.ClientOptions, ) (SupernodeClient, error) { - // Register ALTS protocols, just like in the test - conn.RegisterALTSRecordProtocols() + // Register ALTS protocols once (process-wide). These are global and should not + // be unregistered per-connection to avoid impacting concurrent clients. + registerALTSOnce.Do(func() { conn.RegisterALTSRecordProtocols() }) // Validate required parameters if logger == nil { @@ -154,10 +159,6 @@ func (c *supernodeClient) Close(ctx context.Context) error { if c.conn != nil { c.logger.Debug(ctx, "Closing connection to supernode") err := c.conn.Close() - - // Cleanup ALTS protocols when client is closed - conn.UnregisterALTSRecordProtocols() - return err } return nil diff --git a/sdk/task/cascade.go b/sdk/task/cascade.go index e7de4992..98ca107c 100644 --- a/sdk/task/cascade.go +++ b/sdk/task/cascade.go @@ -75,6 +75,7 @@ func (t *CascadeTask) registerWithSupernodes(ctx context.Context, supernodes lum } var lastErr error + attempted := 0 for idx, sn := range supernodes { // 1 t.LogEvent(ctx, event.SDKRegistrationAttempt, "attempting registration with supernode", event.EventData{ @@ -87,6 +88,7 @@ func (t *CascadeTask) registerWithSupernodes(ctx context.Context, supernodes lum t.logger.Info(ctx, "skip supernode: not serving", "supernode", sn.GrpcEndpoint, "sn-address", sn.CosmosAddress, "iteration", idx+1) continue } + attempted++ if err := t.attemptRegistration(ctx, idx, sn, clientFactory, req); err != nil { // t.LogEvent(ctx, event.SDKRegistrationFailure, "registration with supernode failed", event.EventData{ @@ -105,8 +107,13 @@ func (t *CascadeTask) registerWithSupernodes(ctx context.Context, supernodes lum }) return nil // success } - - return fmt.Errorf("failed to upload to all supernodes: %w", lastErr) + if attempted == 0 { + return fmt.Errorf("no eligible supernodes to register") + } + if lastErr != nil { + return fmt.Errorf("failed to upload to all supernodes: %w", lastErr) + } + return fmt.Errorf("failed to upload to all supernodes") } func (t *CascadeTask) attemptRegistration(ctx context.Context, _ int, sn lumera.Supernode, factory *net.ClientFactory, req *supernodeservice.CascadeSupernodeRegisterRequest) error { diff --git a/sdk/task/download.go b/sdk/task/download.go index 4adecf21..2badeabd 100644 --- a/sdk/task/download.go +++ b/sdk/task/download.go @@ -158,114 +158,3 @@ func (t *CascadeDownloadTask) attemptDownload( return nil } - -// downloadResult holds the result of a successful download attempt -type downloadResult struct { - SupernodeAddress string - SupernodeEndpoint string - Iteration int -} - -// attemptConcurrentDownload tries to download from multiple supernodes concurrently -// Returns the first successful result or all errors if all attempts fail -func (t *CascadeDownloadTask) attemptConcurrentDownload( - ctx context.Context, - batch lumera.Supernodes, - factory *net.ClientFactory, - req *supernodeservice.CascadeSupernodeDownloadRequest, - baseIteration int, -) (*downloadResult, []error) { - // Remove existing file if it exists to allow overwrite (do this once before concurrent attempts) - if _, err := os.Stat(req.OutputPath); err == nil { - if removeErr := os.Remove(req.OutputPath); removeErr != nil { - return nil, []error{fmt.Errorf("failed to remove existing file %s: %w", req.OutputPath, removeErr)} - } - } - - // Create a cancellable context for this batch - batchCtx, cancelBatch := context.WithCancel(ctx) - defer cancelBatch() - - // Channels for results - type attemptResult struct { - success *downloadResult - err error - idx int - } - resultCh := make(chan attemptResult, len(batch)) - - // Start concurrent download attempts - for idx, sn := range batch { - iteration := baseIteration + idx + 1 - - // Log download attempt - t.LogEvent(ctx, event.SDKDownloadAttempt, "attempting download from super-node", event.EventData{ - event.KeySupernode: sn.GrpcEndpoint, - event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: iteration, - }) - - go func(sn lumera.Supernode, idx int, iter int) { - // Create a copy of the request for this goroutine - reqCopy := &supernodeservice.CascadeSupernodeDownloadRequest{ - ActionID: req.ActionID, - TaskID: req.TaskID, - OutputPath: req.OutputPath, - Signature: req.Signature, - } - - err := t.attemptDownload(batchCtx, sn, factory, reqCopy) - if err != nil { - resultCh <- attemptResult{ - err: err, - idx: idx, - } - return - } - - resultCh <- attemptResult{ - success: &downloadResult{ - SupernodeAddress: sn.CosmosAddress, - SupernodeEndpoint: sn.GrpcEndpoint, - Iteration: iter, - }, - idx: idx, - } - }(sn, idx, iteration) - } - - // Collect results - var errors []error - for i := range len(batch) { - select { - case result := <-resultCh: - if result.success != nil { - // Success! Cancel other attempts and return - cancelBatch() - // Drain remaining results to avoid goroutine leaks - go func() { - for j := i + 1; j < len(batch); j++ { - <-resultCh - } - }() - return result.success, nil - } - - // Log failure - sn := batch[result.idx] - t.LogEvent(ctx, event.SDKDownloadFailure, "download from super-node failed", event.EventData{ - event.KeySupernode: sn.GrpcEndpoint, - event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: baseIteration + result.idx + 1, - event.KeyError: result.err.Error(), - }) - errors = append(errors, result.err) - - case <-ctx.Done(): - return nil, []error{ctx.Err()} - } - } - - // All attempts in this batch failed - return nil, errors -} From cd7f49816725984f26cb29153772df045ee2ebd7 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Fri, 10 Oct 2025 21:39:53 +0500 Subject: [PATCH 20/36] improve suernode selection and discovery --- pkg/task/handle.go | 78 ++--- pkg/task/task_test.go | 152 ++++----- sdk/docs/cascade-timeouts.md | 8 +- sdk/task/cascade.go | 38 ++- sdk/task/download.go | 25 +- sdk/task/task.go | 288 +++++++++++++++--- supernode/transport/grpc/cascade/handler.go | 22 +- .../transport/grpc/status/pprof_handlers.go | 3 +- 8 files changed, 418 insertions(+), 196 deletions(-) diff --git a/pkg/task/handle.go b/pkg/task/handle.go index 33359e38..05ba956c 100644 --- a/pkg/task/handle.go +++ b/pkg/task/handle.go @@ -1,65 +1,65 @@ package task import ( - "context" - "sync" - "time" + "context" + "sync" + "time" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" ) // Handle manages a running task with an optional watchdog. // It ensures Start and End are paired, logs start/end, and auto-ends on timeout. type Handle struct { - service string - id string - stop chan struct{} - once sync.Once + service string + id string + stop chan struct{} + once sync.Once } // Start starts tracking a task and returns a Handle that will ensure the // task is ended. A watchdog is started to auto-end the task after timeout // to avoid indefinitely stuck running tasks in status reporting. func Start(ctx context.Context, service, id string, timeout time.Duration) *Handle { - if service == "" || id == "" { - return &Handle{} - } - Default.Start(service, id) - logtrace.Info(ctx, "task: started", logtrace.Fields{"service": service, "task_id": id}) + if service == "" || id == "" { + return &Handle{} + } + Default.Start(service, id) + logtrace.Info(ctx, "task: started", logtrace.Fields{"service": service, "task_id": id}) - g := &Handle{service: service, id: id, stop: make(chan struct{})} - if timeout > 0 { - go func() { - select { - case <-time.After(timeout): - // Auto-end if not already ended - g.endWith(ctx, true) - case <-g.stop: - // normal completion - } - }() - } - return g + g := &Handle{service: service, id: id, stop: make(chan struct{})} + if timeout > 0 { + go func() { + select { + case <-time.After(timeout): + // Auto-end if not already ended + g.endWith(ctx, true) + case <-g.stop: + // normal completion + } + }() + } + return g } // End stops tracking the task. Safe to call multiple times. func (g *Handle) End(ctx context.Context) { - g.endWith(ctx, false) + g.endWith(ctx, false) } // EndWith ends the guard and logs accordingly. If expired is true, // it emits a warning and ends the task to avoid stuck status. func (g *Handle) endWith(ctx context.Context, expired bool) { - if g == nil || g.service == "" || g.id == "" { - return - } - g.once.Do(func() { - close(g.stop) - Default.End(g.service, g.id) - if expired { - logtrace.Warn(ctx, "task: watchdog expired", logtrace.Fields{"service": g.service, "task_id": g.id}) - } else { - logtrace.Info(ctx, "task: ended", logtrace.Fields{"service": g.service, "task_id": g.id}) - } - }) + if g == nil || g.service == "" || g.id == "" { + return + } + g.once.Do(func() { + close(g.stop) + Default.End(g.service, g.id) + if expired { + logtrace.Warn(ctx, "task: watchdog expired", logtrace.Fields{"service": g.service, "task_id": g.id}) + } else { + logtrace.Info(ctx, "task: ended", logtrace.Fields{"service": g.service, "task_id": g.id}) + } + }) } diff --git a/pkg/task/task_test.go b/pkg/task/task_test.go index 624b900f..4c5a44e1 100644 --- a/pkg/task/task_test.go +++ b/pkg/task/task_test.go @@ -1,10 +1,10 @@ package task import ( - "context" - "sync" - "testing" - "time" + "context" + "sync" + "testing" + "time" ) func TestStartEndSnapshot(t *testing.T) { @@ -80,82 +80,82 @@ func TestInvalidInputsAndIsolation(t *testing.T) { // TestConcurrentAccessNoPanic ensures that concurrent Start/End/Snapshot // operations do not panic due to unsafe map access. func TestConcurrentAccessNoPanic(t *testing.T) { - tr := New() - - // Run a mix of writers and readers concurrently. - var wg sync.WaitGroup - startWriters := 8 - snapReaders := 4 - loops := 1000 - - // Writers: repeatedly start/end tasks across a few services. - for w := 0; w < startWriters; w++ { - wg.Add(1) - go func(id int) { - defer wg.Done() - for i := 0; i < loops; i++ { - svc := "svc" + string('A'+rune(id%3)) // svcA, svcB, svcC - tid := svc + ":t" + fmtInt(i%5) - tr.Start(svc, tid) - if i%2 == 0 { - tr.End(svc, tid) - } - } - }(w) - } - - // Readers: take snapshots concurrently. - for r := 0; r < snapReaders; r++ { - wg.Add(1) - go func() { - defer wg.Done() - for i := 0; i < loops; i++ { - _ = tr.Snapshot() - } - }() - } - - // If there is any concurrent map access bug, the test runner would panic. - done := make(chan struct{}) - go func() { wg.Wait(); close(done) }() - select { - case <-done: - // ok - case <-time.After(5 * time.Second): - t.Fatal("concurrent access test timed out") - } + tr := New() + + // Run a mix of writers and readers concurrently. + var wg sync.WaitGroup + startWriters := 8 + snapReaders := 4 + loops := 1000 + + // Writers: repeatedly start/end tasks across a few services. + for w := 0; w < startWriters; w++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + for i := 0; i < loops; i++ { + svc := "svc" + string('A'+rune(id%3)) // svcA, svcB, svcC + tid := svc + ":t" + fmtInt(i%5) + tr.Start(svc, tid) + if i%2 == 0 { + tr.End(svc, tid) + } + } + }(w) + } + + // Readers: take snapshots concurrently. + for r := 0; r < snapReaders; r++ { + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < loops; i++ { + _ = tr.Snapshot() + } + }() + } + + // If there is any concurrent map access bug, the test runner would panic. + done := make(chan struct{}) + go func() { wg.Wait(); close(done) }() + select { + case <-done: + // ok + case <-time.After(5 * time.Second): + t.Fatal("concurrent access test timed out") + } } // fmtInt provides a tiny int-to-string helper to avoid importing strconv. func fmtInt(i int) string { return string('0' + rune(i)) } func TestHandleIdempotentAndWatchdog(t *testing.T) { - // Swap the default tracker to isolate - orig := Default - Default = New() - defer func() { Default = orig }() - - ctx := context.Background() - - // Idempotent End - g := Start(ctx, "svc.guard", "id-1", 0) - g.End(ctx) - g.End(ctx) // no panic, no double-end crash - - // Watchdog auto-end: use a small timeout - g2 := Start(ctx, "svc.guard", "id-2", 50*time.Millisecond) - _ = g2 // ensure guard stays referenced until timeout path - // Do not call End; let the watchdog fire - time.Sleep(120 * time.Millisecond) - - // After watchdog, the task should not be listed - snap := Default.Snapshot() - if ids, ok := snap["svc.guard"]; ok { - // If still present, ensure id-2 is not in the list - for _, id := range ids { - if id == "id-2" { - t.Fatalf("expected watchdog to remove id-2 from svc.guard; snapshot: %v", ids) - } - } - } + // Swap the default tracker to isolate + orig := Default + Default = New() + defer func() { Default = orig }() + + ctx := context.Background() + + // Idempotent End + g := Start(ctx, "svc.guard", "id-1", 0) + g.End(ctx) + g.End(ctx) // no panic, no double-end crash + + // Watchdog auto-end: use a small timeout + g2 := Start(ctx, "svc.guard", "id-2", 50*time.Millisecond) + _ = g2 // ensure guard stays referenced until timeout path + // Do not call End; let the watchdog fire + time.Sleep(120 * time.Millisecond) + + // After watchdog, the task should not be listed + snap := Default.Snapshot() + if ids, ok := snap["svc.guard"]; ok { + // If still present, ensure id-2 is not in the list + for _, id := range ids { + if id == "id-2" { + t.Fatalf("expected watchdog to remove id-2 from svc.guard; snapshot: %v", ids) + } + } + } } diff --git a/sdk/docs/cascade-timeouts.md b/sdk/docs/cascade-timeouts.md index 716804bc..7568dd28 100644 --- a/sdk/docs/cascade-timeouts.md +++ b/sdk/docs/cascade-timeouts.md @@ -34,8 +34,8 @@ This document explains how timeouts and deadlines are applied across the SDK cas 3) `sdk/task/cascade.go: CascadeTask.Run(ctx)` - Validates file size; fetches healthy supernodes; registers with one. -4) Discovery: `sdk/task/task.go: BaseTask.fetchSupernodes` → `BaseTask.isServing` - - `context.WithTimeout(parent, 10s)` for health probe (create client + `HealthCheck`). +4) Discovery: `sdk/task/task.go: BaseTask.fetchSupernodesWithLoads` (single-pass sanitize + load) + - `context.WithTimeout(parent, 10s)` per node: `HealthCheck` + `GetStatus` (peers, running_tasks) + balance. 5) Registration attempt: `sdk/task/cascade.go: attemptRegistration` - Client connect: uses task context (no deadline); gRPC injects a 30s default at connect if needed. @@ -136,7 +136,7 @@ This approach requires no request‑struct changes and preserves existing call s - `supernode/sdk/action/client.go` — entrypoints, no timeouts added. - `supernode/sdk/task/manager.go` — detaches from caller context; creates and runs tasks. - `supernode/sdk/task/timeouts.go` — `connectionTimeout` for health checks. - - `supernode/sdk/task/task.go` — discovery + health checks using `connectionTimeout`. + - `supernode/sdk/task/task.go` — discovery with single-pass probe (`fetchSupernodesWithLoads`) using `connectionTimeout`. - `supernode/sdk/adapters/supernodeservice/timeouts.go` — upload/processing timeout constants. - `supernode/sdk/adapters/supernodeservice/adapter.go` — upload and progress stream handling (phase timers + events). - `supernode/sdk/net/factory.go` — client options tuned for streaming. @@ -170,7 +170,7 @@ This document describes how the SDK applies timeouts and deadlines during cascad 1) `sdk/action/client.go: ClientImpl.StartCascade(ctx, ...)` — forwards `ctx` to the Task Manager. 2) `sdk/task/manager.go: ManagerImpl.CreateCascadeTask(...)` — detaches from caller (`context.WithCancel(context.Background())`). 3) `sdk/task/cascade.go: CascadeTask.Run(ctx)` — validates file size, discovers healthy supernodes, attempts registration. -4) `sdk/task/task.go: BaseTask.fetchSupernodes` → `BaseTask.isServing` — health probe with `connectionTimeout = 10s` per node. +4) `sdk/task/task.go: BaseTask.fetchSupernodesWithLoads` — single-pass probe with `connectionTimeout = 10s` per node (health, status, balance) and load snapshot. 5) `sdk/task/cascade.go: attemptRegistration` — creates client and calls `RegisterCascade` with task context. 6) `sdk/adapters/supernodeservice/adapter.go: CascadeSupernodeRegister` — applies phase timers: - Upload phase: send chunks and metadata; cancel if `cascadeUploadTimeout` elapses. diff --git a/sdk/task/cascade.go b/sdk/task/cascade.go index 98ca107c..68184ea7 100644 --- a/sdk/task/cascade.go +++ b/sdk/task/cascade.go @@ -37,8 +37,8 @@ func (t *CascadeTask) Run(ctx context.Context) error { return err } - // 1 - Fetch the supernodes - supernodes, err := t.fetchSupernodes(ctx, t.Action.Height) + // 1 - Fetch the supernodes (single-pass probe: sanitize + load snapshot) + supernodes, loads, err := t.fetchSupernodesWithLoads(ctx, t.Action.Height) if err != nil { t.LogEvent(ctx, event.SDKSupernodesUnavailable, "Supernodes unavailable", event.EventData{event.KeyError: err.Error()}) @@ -46,8 +46,8 @@ func (t *CascadeTask) Run(ctx context.Context) error { return err } - // Deterministic per-action ordering to distribute load fairly - supernodes = orderSupernodesByDeterministicDistance(t.ActionID, supernodes) + // Rank by current load snapshot (fewest first), tie-break deterministically + supernodes = t.orderByLoadSnapshotThenDeterministic(supernodes, loads) t.LogEvent(ctx, event.SDKSupernodesFound, "Supernodes found.", event.EventData{event.KeyCount: len(supernodes)}) // 2 - Register with the supernodes @@ -76,34 +76,46 @@ func (t *CascadeTask) registerWithSupernodes(ctx context.Context, supernodes lum var lastErr error attempted := 0 - for idx, sn := range supernodes { - // 1 + // Work on a copy and re-rank between attempts to avoid stale ordering + remaining := append(lumera.Supernodes(nil), supernodes...) + for len(remaining) > 0 { + // Refresh load-aware ordering for remaining candidates + remaining = t.orderByLoadThenDeterministic(ctx, remaining) + sn := remaining[0] + iteration := attempted + 1 + t.LogEvent(ctx, event.SDKRegistrationAttempt, "attempting registration with supernode", event.EventData{ event.KeySupernode: sn.GrpcEndpoint, event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: idx + 1, + event.KeyIteration: iteration, }) - // Re-check serving status just-in-time to avoid calling a node that became busy/down + + // Re-check serving status just-in-time to avoid calling a node that became down/underpeered if !t.isServing(ctx, sn) { - t.logger.Info(ctx, "skip supernode: not serving", "supernode", sn.GrpcEndpoint, "sn-address", sn.CosmosAddress, "iteration", idx+1) + t.logger.Info(ctx, "skip supernode: not serving", "supernode", sn.GrpcEndpoint, "sn-address", sn.CosmosAddress, "iteration", iteration) + // Drop this node and retry with the rest + remaining = remaining[1:] continue } + attempted++ - if err := t.attemptRegistration(ctx, idx, sn, clientFactory, req); err != nil { - // + if err := t.attemptRegistration(ctx, iteration-1, sn, clientFactory, req); err != nil { t.LogEvent(ctx, event.SDKRegistrationFailure, "registration with supernode failed", event.EventData{ event.KeySupernode: sn.GrpcEndpoint, event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: idx + 1, + event.KeyIteration: iteration, event.KeyError: err.Error(), }) lastErr = err + // Drop this node and retry with the rest (re-ranked next loop) + remaining = remaining[1:] continue } + t.LogEvent(ctx, event.SDKRegistrationSuccessful, "successfully registered with supernode", event.EventData{ event.KeySupernode: sn.GrpcEndpoint, event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: idx + 1, + event.KeyIteration: iteration, }) return nil // success } diff --git a/sdk/task/download.go b/sdk/task/download.go index 2badeabd..eb9ad8eb 100644 --- a/sdk/task/download.go +++ b/sdk/task/download.go @@ -36,15 +36,15 @@ func NewCascadeDownloadTask(base BaseTask, actionId string, outputPath string, s func (t *CascadeDownloadTask) Run(ctx context.Context) error { t.LogEvent(ctx, event.SDKTaskStarted, "Running cascade download task", nil) - // 1 – fetch super-nodes - supernodes, err := t.fetchSupernodes(ctx, t.Action.Height) + // 1 – fetch super-nodes (single-pass probe: sanitize + load snapshot) + supernodes, loads, err := t.fetchSupernodesWithLoads(ctx, t.Action.Height) if err != nil { t.LogEvent(ctx, event.SDKSupernodesUnavailable, "super-nodes unavailable", event.EventData{event.KeyError: err.Error()}) t.LogEvent(ctx, event.SDKTaskFailed, "task failed", event.EventData{event.KeyError: err.Error()}) return err } - // Deterministic per-action ordering to distribute load fairly - supernodes = orderSupernodesByDeterministicDistance(t.ActionID, supernodes) + // Rank by current load snapshot (fewest first), tie-break deterministically + supernodes = t.orderByLoadSnapshotThenDeterministic(supernodes, loads) t.LogEvent(ctx, event.SDKSupernodesFound, "super-nodes found", event.EventData{event.KeyCount: len(supernodes)}) // 2 – download from super-nodes @@ -78,10 +78,14 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern } } - // Try supernodes sequentially, one by one (now sorted) + // Try supernodes sequentially with re-ranking between attempts var lastErr error - for idx, sn := range supernodes { - iteration := idx + 1 + remaining := append(lumera.Supernodes(nil), supernodes...) + attempted := 0 + for len(remaining) > 0 { + remaining = t.orderByLoadThenDeterministic(ctx, remaining) + sn := remaining[0] + iteration := attempted + 1 // Log download attempt t.LogEvent(ctx, event.SDKDownloadAttempt, "attempting download from super-node", event.EventData{ @@ -90,14 +94,16 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern event.KeyIteration: iteration, }) - // Re-check serving status just-in-time to avoid calling a node that became busy/down + // Re-check serving status just-in-time to avoid calling a node that became down/underpeered if !t.isServing(ctx, sn) { t.logger.Info(ctx, "skip supernode: not serving", "supernode", sn.GrpcEndpoint, "sn-address", sn.CosmosAddress, "iteration", iteration) + remaining = remaining[1:] continue } + attempted++ if err := t.attemptDownload(ctx, sn, clientFactory, req); err != nil { - // Log failure and continue to next supernode + // Log failure and continue with the rest t.LogEvent(ctx, event.SDKDownloadFailure, "download from super-node failed", event.EventData{ event.KeySupernode: sn.GrpcEndpoint, event.KeySupernodeAddress: sn.CosmosAddress, @@ -105,6 +111,7 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern event.KeyError: err.Error(), }) lastErr = err + remaining = remaining[1:] continue } diff --git a/sdk/task/task.go b/sdk/task/task.go index 605819f2..088de086 100644 --- a/sdk/task/task.go +++ b/sdk/task/task.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "sort" "sync" sdkmath "cosmossdk.io/math" @@ -76,42 +77,7 @@ func (t *BaseTask) LogEvent(ctx context.Context, evt event.EventType, msg string t.emitEvent(ctx, evt, additionalInfo) } -func (t *BaseTask) fetchSupernodes(ctx context.Context, height int64) (lumera.Supernodes, error) { - sns, err := t.client.GetSupernodes(ctx, height) - if err != nil { - return nil, fmt.Errorf("fetch supernodes: %w", err) - } - - if len(sns) == 0 { - return nil, errors.New("no supernodes found") - } - - // Keep only SERVING nodes (done in parallel – keeps latency flat) - healthy := make(lumera.Supernodes, 0, len(sns)) - eg, ctx := errgroup.WithContext(ctx) - mu := sync.Mutex{} - - for _, sn := range sns { - sn := sn - eg.Go(func() error { - if t.isServing(ctx, sn) { - mu.Lock() - healthy = append(healthy, sn) - mu.Unlock() - } - return nil - }) - } - if err := eg.Wait(); err != nil { - return nil, fmt.Errorf("health-check goroutines: %w", err) - } - - if len(healthy) == 0 { - return nil, errors.New("no healthy supernodes found") - } - - return healthy, nil -} +// (removed) fetchSupernodes: replaced by fetchSupernodesWithLoads single-pass probe // isServing pings the super-node once with a short timeout. func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool { @@ -149,12 +115,6 @@ func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool { t.logger.Info(ctx, "reject supernode: insufficient peers", "peers_count", status.Network.PeersCount) return false } - // Busy check: exclude supernodes that report running tasks - if rt := status.GetRunningTasks(); len(rt) > 0 { - svc := rt[0].GetServiceName() - t.logger.Info(ctx, "reject supernode: busy", "service", svc) - return false - } denom := txmod.DefaultFeeDenom // base denom (micro), e.g., "ulume" bal, err := t.client.GetBalance(ctx, sn.CosmosAddress, denom) @@ -171,3 +131,247 @@ func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool { return true } + +// fetchSupernodesWithLoads performs a single-pass probe that both sanitizes candidates +// and captures their current running task load for initial ranking. +// Returns the healthy supernodes and a map of node-key -> load. +func (t *BaseTask) fetchSupernodesWithLoads(ctx context.Context, height int64) (lumera.Supernodes, map[string]int, error) { + sns, err := t.client.GetSupernodes(ctx, height) + if err != nil { + return nil, nil, fmt.Errorf("fetch supernodes: %w", err) + } + if len(sns) == 0 { + return nil, nil, errors.New("no supernodes found") + } + + healthy := make(lumera.Supernodes, 0, len(sns)) + loads := make(map[string]int, len(sns)) + mu := sync.Mutex{} + + eg, ctx := errgroup.WithContext(ctx) + for _, sn := range sns { + sn := sn + eg.Go(func() error { + cctx, cancel := context.WithTimeout(ctx, connectionTimeout) + defer cancel() + + client, err := net.NewClientFactory(cctx, t.logger, t.keyring, t.client, net.FactoryConfig{ + LocalCosmosAddress: t.config.Account.LocalCosmosAddress, + PeerType: t.config.Account.PeerType, + }).CreateClient(cctx, sn) + if err != nil { + t.logger.Info(cctx, "reject supernode: client create failed", "reason", err.Error(), "endpoint", sn.GrpcEndpoint, "cosmos", sn.CosmosAddress) + return nil + } + defer client.Close(cctx) + + // Health + resp, err := client.HealthCheck(cctx) + if err != nil || resp.Status != grpc_health_v1.HealthCheckResponse_SERVING { + statusStr := "nil" + if resp != nil { + statusStr = resp.Status.String() + } + t.logger.Info(cctx, "reject supernode: health not SERVING", "error", err, "status", statusStr) + return nil + } + + // Status (for peers + load) + status, err := client.GetSupernodeStatus(cctx) + if err != nil { + t.logger.Info(cctx, "reject supernode: status fetch failed", "error", err) + return nil + } + if status.Network.PeersCount <= 1 { + t.logger.Info(cctx, "reject supernode: insufficient peers", "peers_count", status.Network.PeersCount) + return nil + } + + // Compute load from running tasks (sum of task_count across services) + total := 0 + for _, st := range status.GetRunningTasks() { + if st == nil { + continue + } + if c := int(st.GetTaskCount()); c > 0 { + total += c + } else if ids := st.GetTaskIds(); len(ids) > 0 { + total += len(ids) + } + } + + // Balance + denom := txmod.DefaultFeeDenom + bal, err := t.client.GetBalance(cctx, sn.CosmosAddress, denom) + if err != nil || bal == nil || bal.Balance == nil { + t.logger.Info(cctx, "reject supernode: balance fetch failed or empty", "error", err) + return nil + } + min := sdkmath.NewInt(1_000_000) + if bal.Balance.Amount.LT(min) { + t.logger.Info(cctx, "reject supernode: insufficient balance", "amount", bal.Balance.Amount.String(), "min", min.String()) + return nil + } + + // Accept + mu.Lock() + healthy = append(healthy, sn) + key := sn.CosmosAddress + if key == "" { + key = sn.GrpcEndpoint + } + loads[key] = total + mu.Unlock() + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, nil, fmt.Errorf("health-check goroutines: %w", err) + } + if len(healthy) == 0 { + return nil, nil, errors.New("no healthy supernodes found") + } + return healthy, loads, nil +} + +// orderByLoadSnapshotThenDeterministic sorts using a provided load snapshot; nodes missing +// in the snapshot are considered unknown-load and placed after known-load nodes. +func (t *BaseTask) orderByLoadSnapshotThenDeterministic(sns lumera.Supernodes, loads map[string]int) lumera.Supernodes { + if len(sns) <= 1 { + return sns + } + + det := orderSupernodesByDeterministicDistance(t.ActionID, append(lumera.Supernodes(nil), sns...)) + idx := make(map[string]int, len(det)) + for i, sn := range det { + key := sn.CosmosAddress + if key == "" { + key = sn.GrpcEndpoint + } + idx[key] = i + } + + type scored struct { + sn lumera.Supernode + load int + loadKnown bool + tieIdx int + } + arr := make([]scored, 0, len(sns)) + for _, sn := range sns { + key := sn.CosmosAddress + if key == "" { + key = sn.GrpcEndpoint + } + l, ok := loads[key] + arr = append(arr, scored{sn: sn, load: l, loadKnown: ok, tieIdx: idx[key]}) + } + + sort.Slice(arr, func(i, j int) bool { + ai, aj := arr[i], arr[j] + if ai.loadKnown != aj.loadKnown { + return ai.loadKnown + } + if ai.loadKnown && aj.loadKnown && ai.load != aj.load { + return ai.load < aj.load + } + return ai.tieIdx < aj.tieIdx + }) + + out := make(lumera.Supernodes, len(arr)) + for i := range arr { + out[i] = arr[i].sn + } + return out +} + +// orderByLoadThenDeterministic ranks supernodes by their current running task count (ascending). +// Ties are broken deterministically using orderSupernodesByDeterministicDistance with ActionID as seed. +func (t *BaseTask) orderByLoadThenDeterministic(parent context.Context, sns lumera.Supernodes) lumera.Supernodes { + if len(sns) <= 1 { + return sns + } + + // Precompute deterministic tie-break order index per node + det := orderSupernodesByDeterministicDistance(t.ActionID, append(lumera.Supernodes(nil), sns...)) + idx := make(map[string]int, len(det)) + for i, sn := range det { + key := sn.CosmosAddress + if key == "" { + key = sn.GrpcEndpoint + } + idx[key] = i + } + + type scored struct { + sn lumera.Supernode + load int + loadKnown bool + tieIdx int + } + + out := make([]scored, len(sns)) + + // Collect loads in parallel under the same short connection timeout. + eg, ctx := errgroup.WithContext(parent) + for i, sn := range sns { + i, sn := i, sn + out[i] = scored{sn: sn, load: 0, loadKnown: false, tieIdx: func() int { + k := sn.CosmosAddress + if k == "" { + k = sn.GrpcEndpoint + } + return idx[k] + }()} + eg.Go(func() error { + cctx, cancel := context.WithTimeout(ctx, connectionTimeout) + defer cancel() + client, err := net.NewClientFactory(cctx, t.logger, t.keyring, t.client, net.FactoryConfig{ + LocalCosmosAddress: t.config.Account.LocalCosmosAddress, + PeerType: t.config.Account.PeerType, + }).CreateClient(cctx, sn) + if err != nil { + return nil // unknown load; keep candidate + } + defer client.Close(cctx) + status, err := client.GetSupernodeStatus(cctx) + if err != nil || status == nil { + return nil + } + // Sum total running tasks across services + total := 0 + for _, st := range status.GetRunningTasks() { + if st == nil { + continue + } + if c := int(st.GetTaskCount()); c > 0 { + total += c + } else if ids := st.GetTaskIds(); len(ids) > 0 { + total += len(ids) + } + } + out[i].load = total + out[i].loadKnown = true + return nil + }) + } + _ = eg.Wait() // best-effort; unknown loads are placed after known ones below + + sort.Slice(out, func(i, j int) bool { + ai, aj := out[i], out[j] + if ai.loadKnown != aj.loadKnown { + return ai.loadKnown // known loads first + } + if ai.loadKnown && aj.loadKnown && ai.load != aj.load { + return ai.load < aj.load + } + // Tie-break deterministically + return ai.tieIdx < aj.tieIdx + }) + + res := make(lumera.Supernodes, len(out)) + for i := range out { + res[i] = out[i].sn + } + return res +} diff --git a/supernode/transport/grpc/cascade/handler.go b/supernode/transport/grpc/cascade/handler.go index e99477ac..2ba126d9 100644 --- a/supernode/transport/grpc/cascade/handler.go +++ b/supernode/transport/grpc/cascade/handler.go @@ -30,10 +30,10 @@ func NewCascadeActionServer(factory cascadeService.CascadeServiceFactory) *Actio // calculateOptimalChunkSize returns an optimal chunk size based on file size // to balance throughput and memory usage - var ( - startedTask bool - handle *tasks.Handle - ) +var ( + startedTask bool + handle *tasks.Handle +) func calculateOptimalChunkSize(fileSize int64) int { const ( @@ -133,11 +133,11 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er metadata = x.Metadata logtrace.Debug(ctx, "received metadata", logtrace.Fields{"task_id": metadata.TaskId, "action_id": metadata.ActionId}) // Start live task tracking on first metadata (covers remaining stream and processing) - if !startedTask { - startedTask = true - handle = tasks.Start(ctx, "cascade.upload", metadata.ActionId, 30*time.Minute) - defer handle.End(ctx) - } + if !startedTask { + startedTask = true + handle = tasks.Start(ctx, "cascade.upload", metadata.ActionId, 30*time.Minute) + defer handle.End(ctx) + } } } @@ -204,8 +204,8 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS logtrace.Debug(ctx, "download request received", fields) // Start live task tracking for the entire download RPC (including file streaming) - dlHandle := tasks.Start(ctx, "cascade.download", req.GetActionId(), 30*time.Minute) - defer dlHandle.End(ctx) + dlHandle := tasks.Start(ctx, "cascade.download", req.GetActionId(), 30*time.Minute) + defer dlHandle.End(ctx) // Prepare to capture decoded file path from task events var decodedFilePath string diff --git a/supernode/transport/grpc/status/pprof_handlers.go b/supernode/transport/grpc/status/pprof_handlers.go index 3c8defed..00be8b99 100644 --- a/supernode/transport/grpc/status/pprof_handlers.go +++ b/supernode/transport/grpc/status/pprof_handlers.go @@ -24,7 +24,6 @@ func (s *SupernodeServer) isPprofEnabled() bool { return os.Getenv("ENABLE_PPROF") == "true" } - // Raw pprof handlers - these proxy to the actual pprof HTTP endpoints // pprofProxy makes an internal HTTP request to the actual pprof endpoint @@ -250,4 +249,4 @@ func (s *SupernodeServer) GetRawPprofTrace(ctx context.Context, req *pb.RawPprof } return &pb.RawPprofResponse{Data: data}, nil -} \ No newline at end of file +} From 82d627136d0c1cb27f148ab5244e145534934e36 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Fri, 10 Oct 2025 21:57:53 +0500 Subject: [PATCH 21/36] Improve task tracking --- pkg/task/handle.go | 15 ++++++------ pkg/task/task.go | 3 --- pkg/task/task_test.go | 18 ++++++-------- supernode/cmd/start.go | 10 ++++---- supernode/status/service.go | 24 +++++++++++-------- supernode/transport/grpc/cascade/handler.go | 26 ++++++++++++++++----- 6 files changed, 55 insertions(+), 41 deletions(-) diff --git a/pkg/task/handle.go b/pkg/task/handle.go index 05ba956c..74f6e406 100644 --- a/pkg/task/handle.go +++ b/pkg/task/handle.go @@ -11,6 +11,7 @@ import ( // Handle manages a running task with an optional watchdog. // It ensures Start and End are paired, logs start/end, and auto-ends on timeout. type Handle struct { + tr Tracker service string id string stop chan struct{} @@ -20,22 +21,20 @@ type Handle struct { // Start starts tracking a task and returns a Handle that will ensure the // task is ended. A watchdog is started to auto-end the task after timeout // to avoid indefinitely stuck running tasks in status reporting. -func Start(ctx context.Context, service, id string, timeout time.Duration) *Handle { - if service == "" || id == "" { +func StartWith(tr Tracker, ctx context.Context, service, id string, timeout time.Duration) *Handle { + if tr == nil || service == "" || id == "" { return &Handle{} } - Default.Start(service, id) + tr.Start(service, id) logtrace.Info(ctx, "task: started", logtrace.Fields{"service": service, "task_id": id}) - g := &Handle{service: service, id: id, stop: make(chan struct{})} + g := &Handle{tr: tr, service: service, id: id, stop: make(chan struct{})} if timeout > 0 { go func() { select { case <-time.After(timeout): - // Auto-end if not already ended g.endWith(ctx, true) case <-g.stop: - // normal completion } }() } @@ -55,7 +54,9 @@ func (g *Handle) endWith(ctx context.Context, expired bool) { } g.once.Do(func() { close(g.stop) - Default.End(g.service, g.id) + if g.tr != nil { + g.tr.End(g.service, g.id) + } if expired { logtrace.Warn(ctx, "task: watchdog expired", logtrace.Fields{"service": g.service, "task_id": g.id}) } else { diff --git a/pkg/task/task.go b/pkg/task/task.go index 6bf50e78..8d0c0052 100644 --- a/pkg/task/task.go +++ b/pkg/task/task.go @@ -25,9 +25,6 @@ type InMemoryTracker struct { data map[string]map[string]struct{} } -// Default is a package-level tracker instance for convenience. -var Default = New() - // New creates and returns a new in-memory tracker. func New() *InMemoryTracker { return &InMemoryTracker{data: make(map[string]map[string]struct{})} diff --git a/pkg/task/task_test.go b/pkg/task/task_test.go index 4c5a44e1..1550bc37 100644 --- a/pkg/task/task_test.go +++ b/pkg/task/task_test.go @@ -130,31 +130,27 @@ func TestConcurrentAccessNoPanic(t *testing.T) { func fmtInt(i int) string { return string('0' + rune(i)) } func TestHandleIdempotentAndWatchdog(t *testing.T) { - // Swap the default tracker to isolate - orig := Default - Default = New() - defer func() { Default = orig }() - + tr := New() ctx := context.Background() // Idempotent End - g := Start(ctx, "svc.guard", "id-1", 0) + g := StartWith(tr, ctx, "svc.handle", "id-1", 0) g.End(ctx) g.End(ctx) // no panic, no double-end crash // Watchdog auto-end: use a small timeout - g2 := Start(ctx, "svc.guard", "id-2", 50*time.Millisecond) - _ = g2 // ensure guard stays referenced until timeout path + g2 := StartWith(tr, ctx, "svc.handle", "id-2", 50*time.Millisecond) + _ = g2 // ensure handle stays referenced until timeout path // Do not call End; let the watchdog fire time.Sleep(120 * time.Millisecond) // After watchdog, the task should not be listed - snap := Default.Snapshot() - if ids, ok := snap["svc.guard"]; ok { + snap := tr.Snapshot() + if ids, ok := snap["svc.handle"]; ok { // If still present, ensure id-2 is not in the list for _, id := range ids { if id == "id-2" { - t.Fatalf("expected watchdog to remove id-2 from svc.guard; snapshot: %v", ids) + t.Fatalf("expected watchdog to remove id-2 from svc.handle; snapshot: %v", ids) } } } diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 1b0b1de7..107eba4f 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -18,6 +18,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/lumera" grpcserver "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/server" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/v2/pkg/task" cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" "github.com/LumeraProtocol/supernode/v2/supernode/config" statusService "github.com/LumeraProtocol/supernode/v2/supernode/status" @@ -115,14 +116,15 @@ The supernode will connect to the Lumera network and begin participating in the rqStore, ) - // Create cascade action server - cascadeActionServer := cascadeRPC.NewCascadeActionServer(cService) + // Create a task tracker and cascade action server with DI + tr := task.New() + cascadeActionServer := cascadeRPC.NewCascadeActionServer(cService, tr, 0, 0) // Set the version in the status service package statusService.Version = Version - // Create supernode status service - statusSvc := statusService.NewSupernodeStatusService(p2pService, lumeraClient, appConfig) + // Create supernode status service with injected tracker + statusSvc := statusService.NewSupernodeStatusService(p2pService, lumeraClient, appConfig, tr) // Create supernode server supernodeServer := server.NewSupernodeServer(statusSvc) diff --git a/supernode/status/service.go b/supernode/status/service.go index 5c9c4cd6..f89d0115 100644 --- a/supernode/status/service.go +++ b/supernode/status/service.go @@ -26,11 +26,12 @@ type SupernodeStatusService struct { p2pService p2p.Client lumeraClient lumera.Client config *config.Config + tracker task.Tracker } // NewSupernodeStatusService creates a new supernode status service instance -func NewSupernodeStatusService(p2pService p2p.Client, lumeraClient lumera.Client, cfg *config.Config) *SupernodeStatusService { - return &SupernodeStatusService{metrics: NewMetricsCollector(), storagePaths: []string{"/"}, startTime: time.Now(), p2pService: p2pService, lumeraClient: lumeraClient, config: cfg} +func NewSupernodeStatusService(p2pService p2p.Client, lumeraClient lumera.Client, cfg *config.Config, tracker task.Tracker) *SupernodeStatusService { + return &SupernodeStatusService{metrics: NewMetricsCollector(), storagePaths: []string{"/"}, startTime: time.Now(), p2pService: p2pService, lumeraClient: lumeraClient, config: cfg, tracker: tracker} } // GetChainID returns the chain ID from the configuration @@ -99,14 +100,17 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric resp.Network.PeersCount = 0 resp.Network.PeerAddresses = []string{} - // Populate running tasks from the global in-memory tracker - if snap := task.Default.Snapshot(); len(snap) > 0 { - for svc, ids := range snap { - resp.RunningTasks = append(resp.RunningTasks, &pb.StatusResponse_ServiceTasks{ - ServiceName: svc, - TaskIds: ids, - TaskCount: int32(len(ids)), - }) + // Populate running tasks from injected tracker + if s.tracker != nil { + snap := s.tracker.Snapshot() + if len(snap) > 0 { + for svc, ids := range snap { + resp.RunningTasks = append(resp.RunningTasks, &pb.StatusResponse_ServiceTasks{ + ServiceName: svc, + TaskIds: ids, + TaskCount: int32(len(ids)), + }) + } } } diff --git a/supernode/transport/grpc/cascade/handler.go b/supernode/transport/grpc/cascade/handler.go index 2ba126d9..10fb4586 100644 --- a/supernode/transport/grpc/cascade/handler.go +++ b/supernode/transport/grpc/cascade/handler.go @@ -19,12 +19,26 @@ import ( type ActionServer struct { pb.UnimplementedCascadeServiceServer - factory cascadeService.CascadeServiceFactory + factory cascadeService.CascadeServiceFactory + tracker tasks.Tracker + uploadTimeout time.Duration + downloadTimeout time.Duration } -// NewCascadeActionServer creates a new CascadeActionServer with injected service -func NewCascadeActionServer(factory cascadeService.CascadeServiceFactory) *ActionServer { - return &ActionServer{factory: factory} +const ( + serviceCascadeUpload = "cascade.upload" + serviceCascadeDownload = "cascade.download" +) + +// NewCascadeActionServer creates a new CascadeActionServer with injected service and tracker +func NewCascadeActionServer(factory cascadeService.CascadeServiceFactory, tracker tasks.Tracker, uploadTO, downloadTO time.Duration) *ActionServer { + if uploadTO <= 0 { + uploadTO = 30 * time.Minute + } + if downloadTO <= 0 { + downloadTO = 30 * time.Minute + } + return &ActionServer{factory: factory, tracker: tracker, uploadTimeout: uploadTO, downloadTimeout: downloadTO} } // calculateOptimalChunkSize returns an optimal chunk size based on file size @@ -135,7 +149,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er // Start live task tracking on first metadata (covers remaining stream and processing) if !startedTask { startedTask = true - handle = tasks.Start(ctx, "cascade.upload", metadata.ActionId, 30*time.Minute) + handle = tasks.StartWith(server.tracker, ctx, serviceCascadeUpload, metadata.ActionId, server.uploadTimeout) defer handle.End(ctx) } } @@ -204,7 +218,7 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS logtrace.Debug(ctx, "download request received", fields) // Start live task tracking for the entire download RPC (including file streaming) - dlHandle := tasks.Start(ctx, "cascade.download", req.GetActionId(), 30*time.Minute) + dlHandle := tasks.StartWith(server.tracker, ctx, serviceCascadeDownload, req.GetActionId(), server.downloadTimeout) defer dlHandle.End(ctx) // Prepare to capture decoded file path from task events From 2e5e879a95e57512e80760075e5e8cfc5e6ab5cf Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Mon, 13 Oct 2025 12:46:17 +0500 Subject: [PATCH 22/36] Move adapters to outside cascade --- supernode/{cascade => }/adaptors/lumera.go | 0 supernode/{cascade => }/adaptors/p2p.go | 0 supernode/{cascade => }/adaptors/rq.go | 0 supernode/cascade/download.go | 2 +- supernode/cascade/helper.go | 2 +- supernode/cascade/service.go | 2 +- 6 files changed, 3 insertions(+), 3 deletions(-) rename supernode/{cascade => }/adaptors/lumera.go (100%) rename supernode/{cascade => }/adaptors/p2p.go (100%) rename supernode/{cascade => }/adaptors/rq.go (100%) diff --git a/supernode/cascade/adaptors/lumera.go b/supernode/adaptors/lumera.go similarity index 100% rename from supernode/cascade/adaptors/lumera.go rename to supernode/adaptors/lumera.go diff --git a/supernode/cascade/adaptors/p2p.go b/supernode/adaptors/p2p.go similarity index 100% rename from supernode/cascade/adaptors/p2p.go rename to supernode/adaptors/p2p.go diff --git a/supernode/cascade/adaptors/rq.go b/supernode/adaptors/rq.go similarity index 100% rename from supernode/cascade/adaptors/rq.go rename to supernode/adaptors/rq.go diff --git a/supernode/cascade/download.go b/supernode/cascade/download.go index ec956e01..986fb55d 100644 --- a/supernode/cascade/download.go +++ b/supernode/cascade/download.go @@ -15,7 +15,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/crypto" "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/supernode/cascade/adaptors" + "github.com/LumeraProtocol/supernode/v2/supernode/adaptors" ) const targetRequiredPercent = 17 diff --git a/supernode/cascade/helper.go b/supernode/cascade/helper.go index 1c3b2844..f8375927 100644 --- a/supernode/cascade/helper.go +++ b/supernode/cascade/helper.go @@ -13,7 +13,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/cascade/adaptors" + "github.com/LumeraProtocol/supernode/v2/supernode/adaptors" sdk "github.com/cosmos/cosmos-sdk/types" "google.golang.org/grpc/codes" diff --git a/supernode/cascade/service.go b/supernode/cascade/service.go index 21197c86..374a9389 100644 --- a/supernode/cascade/service.go +++ b/supernode/cascade/service.go @@ -7,7 +7,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/codec" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" - "github.com/LumeraProtocol/supernode/v2/supernode/cascade/adaptors" + "github.com/LumeraProtocol/supernode/v2/supernode/adaptors" ) type CascadeService struct { From 06045083d25da7368970facc2e507e8fc0777c6d Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Mon, 13 Oct 2025 18:09:35 +0500 Subject: [PATCH 23/36] Add version gating in dht --- p2p/kademlia/dht.go | 24 +++++++++++++++++++++--- p2p/kademlia/network.go | 14 ++++++++++++++ p2p/kademlia/node.go | 3 +++ p2p/kademlia/node_activity.go | 17 ++++++++++++----- p2p/kademlia/replication.go | 2 +- p2p/kademlia/version_gate.go | 26 ++++++++++++++++++++++++++ supernode/cmd/start.go | 6 ++++++ 7 files changed, 83 insertions(+), 9 deletions(-) create mode 100644 p2p/kademlia/version_gate.go diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index 62aa2768..7048a727 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -498,9 +498,10 @@ func (s *DHT) newMessage(messageType int, receiver *Node, data interface{}) *Mes } sender := &Node{ - IP: hostIP, - ID: s.ht.self.ID, - Port: s.ht.self.Port, + IP: hostIP, + ID: s.ht.self.ID, + Port: s.ht.self.Port, + Version: requiredVersion(), } return &Message{ Sender: sender, @@ -1398,6 +1399,23 @@ func (s *DHT) sendStoreData(ctx context.Context, n *Node, request *StoreDataRequ // add a node into the appropriate k bucket, return the removed node if it's full func (s *DHT) addNode(ctx context.Context, node *Node) *Node { + // Strict version gating: must match env and be non-empty. + peerVer := "" + if node != nil { + peerVer = node.Version + } + if required, mismatch := versionMismatch(peerVer); mismatch { + fields := logtrace.Fields{ + logtrace.FieldModule: "p2p", + "required": required, + "peer_version": strings.TrimSpace(peerVer), + } + if node != nil { + fields["peer"] = node.String() + } + logtrace.Debug(ctx, "Rejecting node due to version mismatch", fields) + return nil + } // Allow localhost for integration testing isIntegrationTest := os.Getenv("INTEGRATION_TEST") == "true" if node.IP == "" || node.IP == "0.0.0.0" || (!isIntegrationTest && node.IP == "127.0.0.1") { diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index bae7fd87..cb1ff928 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -415,6 +415,20 @@ func (s *Network) handleConn(ctx context.Context, rawConn net.Conn) { } } + // Strict version gating: reject immediately on mismatch or missing + var senderVer string + if request != nil && request.Sender != nil { + senderVer = request.Sender.Version + } + if required, mismatch := versionMismatch(senderVer); mismatch { + logtrace.Debug(ctx, "Rejecting connection due to version mismatch", logtrace.Fields{ + logtrace.FieldModule: "p2p", + "required": required, + "peer_version": strings.TrimSpace(senderVer), + }) + return + } + reqID := uuid.New().String() mt := request.MessageType diff --git a/p2p/kademlia/node.go b/p2p/kademlia/node.go index b7a4baeb..ed37d4be 100644 --- a/p2p/kademlia/node.go +++ b/p2p/kademlia/node.go @@ -23,6 +23,9 @@ type Node struct { // port of the node Port uint16 `json:"port,omitempty"` + // Version of the supernode binary (used for strict DHT gating) + Version string `json:"version,omitempty"` + HashedID []byte } diff --git a/p2p/kademlia/node_activity.go b/p2p/kademlia/node_activity.go index 88e09f7a..f2f77e69 100644 --- a/p2p/kademlia/node_activity.go +++ b/p2p/kademlia/node_activity.go @@ -42,7 +42,7 @@ func (s *DHT) checkNodeActivity(ctx context.Context) { var wg sync.WaitGroup for _, info := range repInfo { - info := info // capture + wg.Add(1) sem <- struct{}{} // acquire go func() { @@ -51,8 +51,8 @@ func (s *DHT) checkNodeActivity(ctx context.Context) { node := s.makeNode([]byte(info.ID), info.IP, info.Port) - // Short per-ping timeout (fail fast) - if err := s.pingNode(ctx, node, 3*time.Second); err != nil { + // Per-ping timeout + if err := s.pingNode(ctx, node, 5*time.Second); err != nil { s.handlePingFailure(ctx, info.Active, node, err) return } @@ -76,8 +76,15 @@ func (s *DHT) pingNode(ctx context.Context, n *Node, timeout time.Duration) erro pctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() req := s.newMessage(Ping, n, nil) - _, err := s.network.Call(pctx, req, false) - return err + resp, err := s.network.Call(pctx, req, false) + if err != nil { + return err + } + // Capture remote version from response sender for later gating + if resp != nil && resp.Sender != nil { + n.Version = resp.Sender.Version + } + return nil } func (s *DHT) handlePingFailure(ctx context.Context, wasActive bool, n *Node, err error) { diff --git a/p2p/kademlia/replication.go b/p2p/kademlia/replication.go index 4a36c422..247f43b8 100644 --- a/p2p/kademlia/replication.go +++ b/p2p/kademlia/replication.go @@ -23,7 +23,7 @@ var ( nodeShowUpDeadline = time.Minute * 35 // check for active & inactive nodes after this interval - checkNodeActivityInterval = time.Minute * 2 + checkNodeActivityInterval = time.Minute * 5 defaultFetchAndStoreInterval = time.Minute * 10 diff --git a/p2p/kademlia/version_gate.go b/p2p/kademlia/version_gate.go new file mode 100644 index 00000000..e9b70239 --- /dev/null +++ b/p2p/kademlia/version_gate.go @@ -0,0 +1,26 @@ +package kademlia + +import "strings" + +var requiredVer string + +// SetRequiredVersion sets the version that peers must match to be accepted. +func SetRequiredVersion(v string) { + requiredVer = strings.TrimSpace(v) +} + +// requiredVersion returns the configured required version (build-time injected by caller). +func requiredVersion() string { + return requiredVer +} + +// versionMismatch determines if the given peer version is unacceptable. +// Policy: required and peer must both be non-empty and exactly equal. +func versionMismatch(peerVersion string) (required string, mismatch bool) { + required = requiredVersion() + peer := strings.TrimSpace(peerVersion) + if required == "" || peer == "" || peer != required { + return required, true + } + return required, false +} diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 107eba4f..b96bc7d5 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -33,6 +33,9 @@ import ( pbcascade "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" pbsupernode "github.com/LumeraProtocol/supernode/v2/gen/supernode" + + // Configure DHT version gating from build-injected Version + "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" ) // startCmd represents the start command @@ -45,6 +48,9 @@ The supernode will connect to the Lumera network and begin participating in the // Initialize logging logtrace.Setup("supernode") + // Set strict DHT required version from build-time injected variable + kademlia.SetRequiredVersion(Version) + // Create context with correlation ID for tracing ctx := logtrace.CtxWithCorrelationID(context.Background(), "supernode-start") // Make the context cancelable for graceful shutdown From fda3eae26de25273af3a8aef269cdd26c0e92110 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Mon, 13 Oct 2025 20:08:44 +0500 Subject: [PATCH 24/36] Add snapshot capture timeouts in status api --- supernode/status/service.go | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/supernode/status/service.go b/supernode/status/service.go index f89d0115..ca58e4ce 100644 --- a/supernode/status/service.go +++ b/supernode/status/service.go @@ -1,9 +1,9 @@ package status import ( - "context" - "fmt" - "time" + "context" + "fmt" + "time" pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" "github.com/LumeraProtocol/supernode/v2/p2p" @@ -18,6 +18,8 @@ import ( // Version is the supernode version, set by the main application var Version = "dev" +const statusSubsystemTimeout = 8 * time.Second + // SupernodeStatusService provides centralized status information type SupernodeStatusService struct { metrics *MetricsCollector @@ -124,8 +126,11 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric Disk: &pb.StatusResponse_P2PMetrics_DiskStatus{}, } - if includeP2PMetrics && s.p2pService != nil { - p2pStats, err := s.p2pService.Stats(ctx) + if includeP2PMetrics && s.p2pService != nil { + // Bound P2P metrics collection so status can't hang if P2P is slow + p2pCtx, cancel := context.WithTimeout(ctx, statusSubsystemTimeout) + defer cancel() + p2pStats, err := s.p2pService.Stats(p2pCtx) if err != nil { logtrace.Error(ctx, "failed to get p2p stats", logtrace.Fields{logtrace.FieldError: err.Error()}) } else { @@ -207,10 +212,15 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric resp.P2PMetrics = pm } - if s.config != nil && s.lumeraClient != nil { - if supernodeInfo, err := s.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, s.config.SupernodeConfig.Identity); err == nil && supernodeInfo != nil { - resp.IpAddress = supernodeInfo.LatestAddress - } - } + if s.config != nil && s.lumeraClient != nil { + // Bound chain query for latest address to avoid slow network hangs + chainCtx, cancel := context.WithTimeout(ctx, statusSubsystemTimeout) + defer cancel() + if supernodeInfo, err := s.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(chainCtx, s.config.SupernodeConfig.Identity); err == nil && supernodeInfo != nil { + resp.IpAddress = supernodeInfo.LatestAddress + } else if err != nil { + logtrace.Error(ctx, "failed to resolve latest supernode address", logtrace.Fields{logtrace.FieldError: err.Error()}) + } + } return resp, nil } From 33b61849b697e4334982526006c88dbe23032c93 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Mon, 13 Oct 2025 22:12:16 +0500 Subject: [PATCH 25/36] Rq-8-GB-Logs --- pkg/codec/decode.go | 19 +++----- pkg/codec/raptorq.go | 8 +-- pkg/lumera/modules/auth/impl.go | 7 ++- supernode/cascade/helper.go | 54 +++++++++++++++++++-- supernode/status/service.go | 36 +++++++------- supernode/transport/grpc/cascade/handler.go | 18 +++++-- 6 files changed, 96 insertions(+), 46 deletions(-) diff --git a/pkg/codec/decode.go b/pkg/codec/decode.go index 4d8ae5f7..251f92c4 100644 --- a/pkg/codec/decode.go +++ b/pkg/codec/decode.go @@ -49,6 +49,7 @@ func (rq *raptorQ) PrepareDecode( logtrace.FieldModule: "rq", logtrace.FieldActionID: actionID, } + logtrace.Info(ctx, "rq: prepare-decode start", fields) // Create root symbols dir for this action symbolsDir := filepath.Join(rq.symbolsBaseDir, actionID) @@ -145,10 +146,7 @@ func (rq *raptorQ) PrepareDecode( return os.RemoveAll(symbolsDir) } - logtrace.Debug(ctx, "prepare decode workspace created", logtrace.Fields{ - "symbols_dir": symbolsDir, - "blocks": len(blockDirs), - }) + logtrace.Info(ctx, "rq: prepare-decode ok", logtrace.Fields{"symbols_dir": symbolsDir, "blocks": len(blockDirs)}) return blockDirs, Write, Cleanup, ws, nil } @@ -164,7 +162,7 @@ func (rq *raptorQ) DecodeFromPrepared( logtrace.FieldModule: "rq", logtrace.FieldActionID: ws.ActionID, } - logtrace.Debug(ctx, "RaptorQ decode (prepared) requested", fields) + logtrace.Info(ctx, "rq: decode-from-prepared start", fields) processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) if err != nil { @@ -214,7 +212,7 @@ func (rq *raptorQ) DecodeFromPrepared( fields[logtrace.FieldError] = err.Error() return DecodeResponse{}, fmt.Errorf("write layout file: %w", err) } - logtrace.Debug(ctx, "layout.json written (prepared)", fields) + logtrace.Info(ctx, "rq: layout written", fields) // Decode to output (idempotent-safe: overwrite on success) outputPath := filepath.Join(ws.SymbolsDir, "output") @@ -224,9 +222,7 @@ func (rq *raptorQ) DecodeFromPrepared( return DecodeResponse{}, fmt.Errorf("raptorq decode: %w", err) } - logtrace.Debug(ctx, "RaptorQ decoding completed successfully (prepared)", logtrace.Fields{ - "output_path": outputPath, - }) + logtrace.Info(ctx, "rq: decode-from-prepared ok", logtrace.Fields{"output_path": outputPath}) return DecodeResponse{FilePath: outputPath, DecodeTmpDir: ws.SymbolsDir}, nil } @@ -236,7 +232,7 @@ func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeRespons logtrace.FieldModule: "rq", logtrace.FieldActionID: req.ActionID, } - logtrace.Debug(ctx, "RaptorQ decode request received", fields) + logtrace.Info(ctx, "rq: decode request", fields) // 1) Validate layout (the check) if len(req.Layout.Blocks) == 0 { @@ -273,7 +269,7 @@ func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeRespons return DecodeResponse{}, werr } } - logtrace.Debug(ctx, "symbols persisted via Write()", fields) + logtrace.Info(ctx, "rq: symbols persisted", logtrace.Fields{"count": len(req.Symbols)}) } // 4) Decode using the prepared workspace (functionality) @@ -283,5 +279,6 @@ func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeRespons return DecodeResponse{}, derr } success = true + logtrace.Info(ctx, "rq: decode ok", fields) return resp, nil } diff --git a/pkg/codec/raptorq.go b/pkg/codec/raptorq.go index 14bad1d9..d2761bd9 100644 --- a/pkg/codec/raptorq.go +++ b/pkg/codec/raptorq.go @@ -15,7 +15,7 @@ const ( rqSymbolSize uint16 = 65535 rqRedundancyFactor uint8 = 6 // Limit RaptorQ processor memory usage to ~2 GiB - rqMaxMemoryMB uint64 = 4 * 1024 // MB + rqMaxMemoryMB uint64 = 8 * 1024 // MB // Concurrency tuned for 2 GiB limit and typical 8+ core CPUs rqConcurrency uint64 = 1 // Target single-block output for up to 1 GiB files with padding headroom (~1.25 GiB) @@ -43,6 +43,7 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons "data-size": req.DataSize, } + logtrace.Info(ctx, "rq: encode start", fields) processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) if err != nil { return EncodeResponse{}, fmt.Errorf("create RaptorQ processor: %w", err) @@ -86,7 +87,7 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons if n := len(encodeResp.Metadata.Blocks); n != 1 { return EncodeResponse{}, fmt.Errorf("raptorq encode produced %d blocks; single-block layout is required", n) } - + logtrace.Info(ctx, "rq: encode ok", logtrace.Fields{"symbols_dir": encodeResp.SymbolsDir}) return encodeResp, nil } @@ -102,6 +103,7 @@ func (rq *raptorQ) CreateMetadata(ctx context.Context, path string) (Layout, err fields["data-size"] = int(fi.Size()) } + logtrace.Info(ctx, "rq: create-metadata start", fields) processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) if err != nil { return Layout{}, fmt.Errorf("create RaptorQ processor: %w", err) @@ -147,6 +149,6 @@ func (rq *raptorQ) CreateMetadata(ctx context.Context, path string) (Layout, err if n := len(layout.Blocks); n != 1 { return Layout{}, fmt.Errorf("raptorq metadata produced %d blocks; single-block layout is required", n) } - + logtrace.Info(ctx, "rq: create-metadata ok", logtrace.Fields{"blocks": len(layout.Blocks)}) return layout, nil } diff --git a/pkg/lumera/modules/auth/impl.go b/pkg/lumera/modules/auth/impl.go index a3ad3bca..4304e2dd 100644 --- a/pkg/lumera/modules/auth/impl.go +++ b/pkg/lumera/modules/auth/impl.go @@ -45,8 +45,7 @@ func (m *module) Verify(ctx context.Context, accAddress string, data, signature if err != nil { return fmt.Errorf("invalid address: %w", err) } - - logtrace.Debug(ctx, "Verifying signature", logtrace.Fields{"address": addr.String()}) + logtrace.Info(ctx, "auth: verify signature start", logtrace.Fields{"address": addr.String()}) // Use Account RPC instead of AccountInfo to get the full account with public key accResp, err := m.client.Account(ctx, &authtypes.QueryAccountRequest{ @@ -66,10 +65,10 @@ func (m *module) Verify(ctx context.Context, accAddress string, data, signature if pubKey == nil { return fmt.Errorf("public key is nil") } - logtrace.Debug(ctx, "Public key retrieved", logtrace.Fields{"pubKey": pubKey.String()}) + logtrace.Info(ctx, "auth: public key loaded", logtrace.Fields{"address": addr.String()}) if !pubKey.VerifySignature(data, signature) { return fmt.Errorf("invalid signature") } - + logtrace.Info(ctx, "auth: verify signature ok", logtrace.Fields{"address": addr.String()}) return nil } diff --git a/supernode/cascade/helper.go b/supernode/cascade/helper.go index f8375927..491e2174 100644 --- a/supernode/cascade/helper.go +++ b/supernode/cascade/helper.go @@ -21,6 +21,11 @@ import ( ) func (task *CascadeRegistrationTask) fetchAction(ctx context.Context, actionID string, f logtrace.Fields) (*actiontypes.Action, error) { + if f == nil { + f = logtrace.Fields{} + } + f[logtrace.FieldActionID] = actionID + logtrace.Info(ctx, "register: fetch action start", f) res, err := task.LumeraClient.GetAction(ctx, actionID) if err != nil { return nil, task.wrapErr(ctx, "failed to get action", err, f) @@ -28,16 +33,21 @@ func (task *CascadeRegistrationTask) fetchAction(ctx context.Context, actionID s if res.GetAction().ActionID == "" { return nil, task.wrapErr(ctx, "action not found", errors.New(""), f) } - logtrace.Debug(ctx, "action has been retrieved", f) + logtrace.Info(ctx, "register: fetch action ok", f) return res.GetAction(), nil } func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, blockHeight uint64, f logtrace.Fields) error { + if f == nil { + f = logtrace.Fields{} + } + f[logtrace.FieldBlockHeight] = blockHeight + logtrace.Info(ctx, "register: top-supernodes fetch start", f) top, err := task.LumeraClient.GetTopSupernodes(ctx, blockHeight) if err != nil { return task.wrapErr(ctx, "failed to get top SNs", err, f) } - logtrace.Debug(ctx, "Fetched Top Supernodes", f) + logtrace.Info(ctx, "register: top-supernodes fetch ok", f) if !supernode.Exists(top.Supernodes, task.config.SupernodeAccountAddress) { addresses := make([]string, len(top.Supernodes)) for i, sn := range top.Supernodes { @@ -46,22 +56,38 @@ func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, b logtrace.Debug(ctx, "Supernode not in top list", logtrace.Fields{"currentAddress": task.config.SupernodeAccountAddress, "topSupernodes": addresses}) return task.wrapErr(ctx, "current supernode does not exist in the top SNs list", errors.Errorf("current address: %s, top supernodes: %v", task.config.SupernodeAccountAddress, addresses), f) } + logtrace.Info(ctx, "register: top-supernode verified", f) return nil } func (task *CascadeRegistrationTask) encodeInput(ctx context.Context, actionID string, path string, f logtrace.Fields) (*adaptors.EncodeResult, error) { + if f == nil { + f = logtrace.Fields{} + } + f[logtrace.FieldActionID] = actionID + f["input_path"] = path + logtrace.Info(ctx, "register: encode input start", f) resp, err := task.RQ.EncodeInput(ctx, actionID, path) if err != nil { return nil, task.wrapErr(ctx, "failed to encode data", err, f) } + // Enrich fields with result for subsequent logs + f["symbols_dir"] = resp.SymbolsDir + logtrace.Info(ctx, "register: encode input ok", f) return &resp, nil } func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context.Context, encoded string, creator string, encodedMeta codec.Layout, f logtrace.Fields) (codec.Layout, string, error) { + if f == nil { + f = logtrace.Fields{} + } + f[logtrace.FieldCreator] = creator + logtrace.Info(ctx, "register: verify+decode layout start", f) indexFileB64, creatorSig, err := cascadekit.ExtractIndexAndCreatorSig(encoded) if err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to extract index file and creator signature", err, f) } + logtrace.Info(ctx, "register: index+creatorSig extracted", f) creatorSigBytes, err := base64.StdEncoding.DecodeString(creatorSig) if err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode creator signature from base64", err, f) @@ -69,11 +95,12 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. if err := task.LumeraClient.Verify(ctx, creator, []byte(indexFileB64), creatorSigBytes); err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify creator signature", err, f) } - logtrace.Debug(ctx, "creator signature successfully verified", f) + logtrace.Info(ctx, "register: creator signature verified", f) indexFile, err := cascadekit.DecodeIndexB64(indexFileB64) if err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode index file", err, f) } + _ = indexFile // keep for potential future detail logs layoutSigBytes, err := base64.StdEncoding.DecodeString(indexFile.LayoutSignature) if err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode layout signature from base64", err, f) @@ -85,20 +112,30 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSigBytes); err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify layout signature", err, f) } - logtrace.Debug(ctx, "layout signature successfully verified", f) + logtrace.Info(ctx, "register: layout signature verified", f) + logtrace.Info(ctx, "register: verify+decode layout ok", f) return encodedMeta, indexFile.LayoutSignature, nil } func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta actiontypes.CascadeMetadata, sig string, encodedMeta codec.Layout, f logtrace.Fields) (cascadekit.GenRQIdentifiersFilesResponse, error) { + if f == nil { + f = logtrace.Fields{} + } + f["rq_ic"] = uint32(meta.RqIdsIc) + f["rq_max"] = uint32(meta.RqIdsMax) + logtrace.Info(ctx, "register: rqid files generation start", f) layoutRes, err := cascadekit.GenerateLayoutFiles(ctx, encodedMeta, sig, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) if err != nil { return cascadekit.GenRQIdentifiersFilesResponse{}, task.wrapErr(ctx, "failed to generate layout files", err, f) } + logtrace.Info(ctx, "register: layout files generated", logtrace.Fields{"count": len(layoutRes.RedundantMetadataFiles)}) indexIDs, indexFiles, err := cascadekit.GenerateIndexFiles(ctx, meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) if err != nil { return cascadekit.GenRQIdentifiersFilesResponse{}, task.wrapErr(ctx, "failed to generate index files", err, f) } allFiles := append(layoutRes.RedundantMetadataFiles, indexFiles...) + logtrace.Info(ctx, "register: index files generated", logtrace.Fields{"count": len(indexFiles), "rqids": len(indexIDs)}) + logtrace.Info(ctx, "register: rqid files generation ok", logtrace.Fields{"total_files": len(allFiles)}) return cascadekit.GenRQIdentifiersFilesResponse{RQIDs: indexIDs, RedundantMetadataFiles: allFiles}, nil } @@ -115,6 +152,7 @@ func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionI if err := task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{IDFiles: idFiles, SymbolsDir: symbolsDir, TaskID: task.taskID, ActionID: actionID}, f); err != nil { return task.wrapErr(ctx, "failed to store artefacts", err, lf) } + logtrace.Info(ctx, "store: first-pass ok", lf) return nil } @@ -134,11 +172,16 @@ func (task *CascadeRegistrationTask) emitArtefactsStored(ctx context.Context, fi fields = logtrace.Fields{} } msg := "Artefacts stored" - logtrace.Debug(ctx, "artefacts have been stored", fields) + logtrace.Info(ctx, "register: artefacts stored", fields) task.streamEvent(SupernodeEventTypeArtefactsStored, msg, "", send) } func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action *actiontypes.Action, dataSize int, fields logtrace.Fields) error { + if fields == nil { + fields = logtrace.Fields{} + } + fields["data_bytes"] = dataSize + logtrace.Info(ctx, "register: verify action fee start", fields) dataSizeInKBs := dataSize / 1024 fee, err := task.LumeraClient.GetActionFee(ctx, strconv.Itoa(dataSizeInKBs)) if err != nil { @@ -157,5 +200,6 @@ func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action } return task.wrapErr(ctx, "insufficient fee", errors.Errorf("expected at least %s, got %s", requiredFee.String(), got), fields) } + logtrace.Info(ctx, "register: verify action fee ok", logtrace.Fields{"required_fee": requiredFee.String()}) return nil } diff --git a/supernode/status/service.go b/supernode/status/service.go index ca58e4ce..0645385f 100644 --- a/supernode/status/service.go +++ b/supernode/status/service.go @@ -1,9 +1,9 @@ package status import ( - "context" - "fmt" - "time" + "context" + "fmt" + "time" pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" "github.com/LumeraProtocol/supernode/v2/p2p" @@ -126,11 +126,11 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric Disk: &pb.StatusResponse_P2PMetrics_DiskStatus{}, } - if includeP2PMetrics && s.p2pService != nil { - // Bound P2P metrics collection so status can't hang if P2P is slow - p2pCtx, cancel := context.WithTimeout(ctx, statusSubsystemTimeout) - defer cancel() - p2pStats, err := s.p2pService.Stats(p2pCtx) + if includeP2PMetrics && s.p2pService != nil { + // Bound P2P metrics collection so status can't hang if P2P is slow + p2pCtx, cancel := context.WithTimeout(ctx, statusSubsystemTimeout) + defer cancel() + p2pStats, err := s.p2pService.Stats(p2pCtx) if err != nil { logtrace.Error(ctx, "failed to get p2p stats", logtrace.Fields{logtrace.FieldError: err.Error()}) } else { @@ -212,15 +212,15 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric resp.P2PMetrics = pm } - if s.config != nil && s.lumeraClient != nil { - // Bound chain query for latest address to avoid slow network hangs - chainCtx, cancel := context.WithTimeout(ctx, statusSubsystemTimeout) - defer cancel() - if supernodeInfo, err := s.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(chainCtx, s.config.SupernodeConfig.Identity); err == nil && supernodeInfo != nil { - resp.IpAddress = supernodeInfo.LatestAddress - } else if err != nil { - logtrace.Error(ctx, "failed to resolve latest supernode address", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - } + if s.config != nil && s.lumeraClient != nil { + // Bound chain query for latest address to avoid slow network hangs + chainCtx, cancel := context.WithTimeout(ctx, statusSubsystemTimeout) + defer cancel() + if supernodeInfo, err := s.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(chainCtx, s.config.SupernodeConfig.Identity); err == nil && supernodeInfo != nil { + resp.IpAddress = supernodeInfo.LatestAddress + } else if err != nil { + logtrace.Error(ctx, "failed to resolve latest supernode address", logtrace.Fields{logtrace.FieldError: err.Error()}) + } + } return resp, nil } diff --git a/supernode/transport/grpc/cascade/handler.go b/supernode/transport/grpc/cascade/handler.go index 10fb4586..96237b98 100644 --- a/supernode/transport/grpc/cascade/handler.go +++ b/supernode/transport/grpc/cascade/handler.go @@ -87,7 +87,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er } ctx := stream.Context() - logtrace.Debug(ctx, "client streaming request to upload cascade input data received", fields) + logtrace.Info(ctx, "register: stream open", fields) const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit @@ -141,11 +141,16 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er logtrace.Error(ctx, "upload rejected: file too large", fields) return fmt.Errorf("file size %d exceeds maximum allowed size of 1GB", totalSize) } + // Keep chunk logs at debug to avoid verbosity logtrace.Debug(ctx, "received data chunk", logtrace.Fields{"chunk_size": len(x.Chunk.Data), "total_size_so_far": totalSize}) } case *pb.RegisterRequest_Metadata: metadata = x.Metadata - logtrace.Debug(ctx, "received metadata", logtrace.Fields{"task_id": metadata.TaskId, "action_id": metadata.ActionId}) + // Set correlation ID for the rest of the flow + ctx = logtrace.CtxWithCorrelationID(ctx, metadata.ActionId) + fields[logtrace.FieldTaskID] = metadata.GetTaskId() + fields[logtrace.FieldActionID] = metadata.GetActionId() + logtrace.Info(ctx, "register: metadata received", fields) // Start live task tracking on first metadata (covers remaining stream and processing) if !startedTask { startedTask = true @@ -161,7 +166,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er } fields[logtrace.FieldTaskID] = metadata.GetTaskId() fields[logtrace.FieldActionID] = metadata.GetActionId() - logtrace.Debug(ctx, "metadata received from action-sdk", fields) + logtrace.Info(ctx, "register: stream upload complete", fields) if err := tempFile.Sync(); err != nil { fields[logtrace.FieldError] = err.Error() @@ -172,7 +177,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er hash := hasher.Sum(nil) hashHex := hex.EncodeToString(hash) fields[logtrace.FieldHashHex] = hashHex - logtrace.Debug(ctx, "final BLAKE3 hash generated", fields) + logtrace.Info(ctx, "register: hash computed", fields) targetPath, err := replaceTempDirWithTaskDir(metadata.GetTaskId(), tempFilePath, tempFile) if err != nil { @@ -182,6 +187,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er } task := server.factory.NewCascadeRegistrationTask() + logtrace.Info(ctx, "register: task start", fields) err = task.Register(ctx, &cascadeService.RegisterRequest{ TaskID: metadata.TaskId, ActionID: metadata.ActionId, @@ -198,13 +204,15 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er logtrace.Error(ctx, "failed to send response to client", logtrace.Fields{logtrace.FieldError: err.Error()}) return err } + // Mirror event to Info logs for high-level tracing + logtrace.Info(ctx, "register: event", logtrace.Fields{"event_type": resp.EventType, "message": resp.Message, logtrace.FieldTxHash: resp.TxHash, logtrace.FieldActionID: metadata.ActionId, logtrace.FieldTaskID: metadata.TaskId}) return nil }) if err != nil { logtrace.Error(ctx, "registration task failed", logtrace.Fields{logtrace.FieldError: err.Error()}) return fmt.Errorf("registration failed: %w", err) } - logtrace.Debug(ctx, "cascade registration completed successfully", fields) + logtrace.Info(ctx, "register: task ok", fields) return nil } From ae0b6c11b276bb767070deffba663fa33ce631ca Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Tue, 14 Oct 2025 08:11:35 +0500 Subject: [PATCH 26/36] Migrate github client to main pkg --- {sn-manager/internal => pkg}/github/client.go | 0 sn-manager/cmd/check.go | 2 +- sn-manager/cmd/get.go | 2 +- sn-manager/cmd/init.go | 2 +- sn-manager/cmd/ls-remote.go | 64 +++++++++---------- sn-manager/cmd/start.go | 2 +- sn-manager/internal/updater/updater.go | 4 +- 7 files changed, 38 insertions(+), 38 deletions(-) rename {sn-manager/internal => pkg}/github/client.go (100%) diff --git a/sn-manager/internal/github/client.go b/pkg/github/client.go similarity index 100% rename from sn-manager/internal/github/client.go rename to pkg/github/client.go diff --git a/sn-manager/cmd/check.go b/sn-manager/cmd/check.go index 2e6e971b..4910eb06 100644 --- a/sn-manager/cmd/check.go +++ b/sn-manager/cmd/check.go @@ -4,8 +4,8 @@ import ( "fmt" "strings" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/updater" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" "github.com/spf13/cobra" diff --git a/sn-manager/cmd/get.go b/sn-manager/cmd/get.go index eb8f0fac..7244c10f 100644 --- a/sn-manager/cmd/get.go +++ b/sn-manager/cmd/get.go @@ -6,8 +6,8 @@ import ( "os" "path/filepath" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/version" "github.com/spf13/cobra" diff --git a/sn-manager/cmd/init.go b/sn-manager/cmd/init.go index 383d70ad..2eb2639c 100644 --- a/sn-manager/cmd/init.go +++ b/sn-manager/cmd/init.go @@ -8,8 +8,8 @@ import ( "path/filepath" "github.com/AlecAivazis/survey/v2" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/version" "github.com/spf13/cobra" diff --git a/sn-manager/cmd/ls-remote.go b/sn-manager/cmd/ls-remote.go index 65619fd1..0d7bdff6 100644 --- a/sn-manager/cmd/ls-remote.go +++ b/sn-manager/cmd/ls-remote.go @@ -3,8 +3,8 @@ package cmd import ( "fmt" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/spf13/cobra" ) @@ -15,37 +15,37 @@ var lsRemoteCmd = &cobra.Command{ } func runLsRemote(cmd *cobra.Command, args []string) error { - client := github.NewClient(config.GitHubRepo) - - releases, err := client.ListReleases() - if err != nil { - return fmt.Errorf("failed to list releases: %w", err) - } - - // Filter to stable (non-draft, non-prerelease) - var stable []*github.Release - for _, r := range releases { - if !r.Draft && !r.Prerelease { - stable = append(stable, r) - } - } - - if len(stable) == 0 { - fmt.Println("No releases found") - return nil - } - - fmt.Println("Available versions:") - for i, release := range stable { - if i == 0 { - fmt.Printf(" %s (latest) - %s\n", release.TagName, release.PublishedAt.Format("2006-01-02")) - } else { - fmt.Printf(" %s - %s\n", release.TagName, release.PublishedAt.Format("2006-01-02")) - } - if i >= 9 { - break - } - } + client := github.NewClient(config.GitHubRepo) + + releases, err := client.ListReleases() + if err != nil { + return fmt.Errorf("failed to list releases: %w", err) + } + + // Filter to stable (non-draft, non-prerelease) + var stable []*github.Release + for _, r := range releases { + if !r.Draft && !r.Prerelease { + stable = append(stable, r) + } + } + + if len(stable) == 0 { + fmt.Println("No releases found") + return nil + } + + fmt.Println("Available versions:") + for i, release := range stable { + if i == 0 { + fmt.Printf(" %s (latest) - %s\n", release.TagName, release.PublishedAt.Format("2006-01-02")) + } else { + fmt.Printf(" %s - %s\n", release.TagName, release.PublishedAt.Format("2006-01-02")) + } + if i >= 9 { + break + } + } return nil } diff --git a/sn-manager/cmd/start.go b/sn-manager/cmd/start.go index f98ff737..6deb1583 100644 --- a/sn-manager/cmd/start.go +++ b/sn-manager/cmd/start.go @@ -11,8 +11,8 @@ import ( "strings" "syscall" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/manager" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/updater" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" diff --git a/sn-manager/internal/updater/updater.go b/sn-manager/internal/updater/updater.go index 3fe4fd3f..2e6f9d56 100644 --- a/sn-manager/internal/updater/updater.go +++ b/sn-manager/internal/updater/updater.go @@ -12,8 +12,8 @@ import ( "time" pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/version" "github.com/LumeraProtocol/supernode/v2/supernode/transport/gateway" @@ -28,7 +28,7 @@ const ( updateCheckInterval = 10 * time.Minute // forceUpdateAfter is the age threshold after a release is published // beyond which updates are applied regardless of normal gates (idle, policy) - forceUpdateAfter = 60 * time.Minute + forceUpdateAfter = 10 * time.Minute ) type AutoUpdater struct { From 4891e2466da2389ad9f33c139d38560cb89443d0 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Tue, 14 Oct 2025 09:26:25 +0500 Subject: [PATCH 27/36] Enforce version check in sdk --- sdk/adapters/supernodeservice/adapter.go | 8 +++++- sdk/helpers/github_helper.go | 32 ++++++++++++++++++++++++ sdk/task/task.go | 5 ++-- 3 files changed, 42 insertions(+), 3 deletions(-) create mode 100644 sdk/helpers/github_helper.go diff --git a/sdk/adapters/supernodeservice/adapter.go b/sdk/adapters/supernodeservice/adapter.go index 9712915c..3195b694 100644 --- a/sdk/adapters/supernodeservice/adapter.go +++ b/sdk/adapters/supernodeservice/adapter.go @@ -225,7 +225,9 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca bytesRead += int64(n) progress := float64(bytesRead) / float64(totalBytes) * 100 - a.logger.Debug(ctx, "Sent data chunk", "chunkIndex", chunkIndex, "chunkSize", n, "progress", fmt.Sprintf("%.1f%%", progress)) + // Print upload progress directly to stdout + fmt.Printf("Upload progress: task_id=%s action_id=%s chunk_index=%d chunk_size=%d progress=%.1f%% bytes=%d/%d\n", + in.TaskId, in.ActionID, chunkIndex, n, progress, bytesRead, totalBytes) chunkIndex++ } @@ -477,6 +479,10 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( chunkIndex++ a.logger.Debug(ctx, "received chunk", "chunk_index", chunkIndex, "chunk_size", len(data), "bytes_written", bytesWritten) + + // Print download progress directly to stdout (similar to upload progress) + fmt.Printf("Download progress: action_id=%s chunk_index=%d chunk_size=%d bytes=%d\n", + in.ActionID, chunkIndex, len(data), bytesWritten) } } diff --git a/sdk/helpers/github_helper.go b/sdk/helpers/github_helper.go new file mode 100644 index 00000000..edf5eefc --- /dev/null +++ b/sdk/helpers/github_helper.go @@ -0,0 +1,32 @@ +package helpers + +import ( + "strings" + "sync" + + gh "github.com/LumeraProtocol/supernode/v2/pkg/github" +) + +var ( + requiredSupernodeVersion string + requiredVersionOnce sync.Once +) + +// ResolveRequiredSupernodeVersion returns the latest stable SuperNode tag from GitHub. +// The value is fetched once per process and cached. If lookup fails, it returns +// an empty string so callers can gracefully skip strict version gating. +func ResolveRequiredSupernodeVersion() string { + requiredVersionOnce.Do(func() { + client := gh.NewClient("LumeraProtocol/supernode") + if client != nil { + if release, err := client.GetLatestStableRelease(); err == nil { + if tag := strings.TrimSpace(release.TagName); tag != "" { + requiredSupernodeVersion = tag + return + } + } + } + requiredSupernodeVersion = "" + }) + return requiredSupernodeVersion +} diff --git a/sdk/task/task.go b/sdk/task/task.go index 088de086..7dd72e8f 100644 --- a/sdk/task/task.go +++ b/sdk/task/task.go @@ -13,6 +13,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" "github.com/LumeraProtocol/supernode/v2/sdk/config" "github.com/LumeraProtocol/supernode/v2/sdk/event" + "github.com/LumeraProtocol/supernode/v2/sdk/helpers" "github.com/LumeraProtocol/supernode/v2/sdk/log" "github.com/LumeraProtocol/supernode/v2/sdk/net" "google.golang.org/grpc/health/grpc_health_v1" @@ -182,8 +183,8 @@ func (t *BaseTask) fetchSupernodesWithLoads(ctx context.Context, height int64) ( t.logger.Info(cctx, "reject supernode: status fetch failed", "error", err) return nil } - if status.Network.PeersCount <= 1 { - t.logger.Info(cctx, "reject supernode: insufficient peers", "peers_count", status.Network.PeersCount) + if reqVer := helpers.ResolveRequiredSupernodeVersion(); reqVer != "" && status.Version != reqVer { + t.logger.Info(cctx, "reject supernode: version mismatch", "expected", reqVer, "got", status.Version) return nil } From ad822590e90a448b5c44ebabeb6e4714f4cbf714 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Fri, 17 Oct 2025 17:22:07 +0500 Subject: [PATCH 28/36] Add cache for supernode info calls in sdk --- sdk/adapters/lumera/adapter.go | 134 +++++++++++++++++++++++++++------ 1 file changed, 112 insertions(+), 22 deletions(-) diff --git a/sdk/adapters/lumera/adapter.go b/sdk/adapters/lumera/adapter.go index 042c2273..3e21627c 100644 --- a/sdk/adapters/lumera/adapter.go +++ b/sdk/adapters/lumera/adapter.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "sort" + "time" "github.com/LumeraProtocol/supernode/v2/sdk/log" @@ -14,7 +15,18 @@ import ( "github.com/cosmos/cosmos-sdk/crypto/keyring" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + ristretto "github.com/dgraph-io/ristretto/v2" "github.com/golang/protobuf/proto" + "golang.org/x/sync/singleflight" +) + +const ( + // Cache tuning: tiny LFU with TTL to avoid stale long-term entries + cacheNumCounters = 1_000 + cacheMaxCost = 100 + cacheBufferItems = 64 + cacheItemCost = 1 + cacheTTL = time.Hour ) //go:generate mockery --name=Client --output=testutil/mocks --outpkg=mocks --filename=lumera_mock.go @@ -54,6 +66,11 @@ type ConfigParams struct { type Adapter struct { client lumeraclient.Client logger log.Logger + + // Lightweight caches to reduce repeated chain lookups when used as a validator + accountCache *ristretto.Cache[string, *authtypes.QueryAccountInfoResponse] + supernodeCache *ristretto.Cache[string, *sntypes.SuperNode] + sf singleflight.Group } // NewAdapter creates a new Adapter with dependencies explicitly injected @@ -77,31 +94,74 @@ func NewAdapter(ctx context.Context, config ConfigParams, logger log.Logger) (Cl logger.Info(ctx, "Lumera adapter created successfully") + // Initialize small, bounded caches return &Adapter{ - client: client, - logger: logger, + client: client, + logger: logger, + accountCache: newStringCache[*authtypes.QueryAccountInfoResponse](), + supernodeCache: newStringCache[*sntypes.SuperNode](), }, nil } +func newStringCache[T any]() *ristretto.Cache[string, T] { + c, _ := ristretto.NewCache(&ristretto.Config[string, T]{ + NumCounters: cacheNumCounters, + MaxCost: cacheMaxCost, + BufferItems: cacheBufferItems, + }) + return c +} + func (a *Adapter) GetSupernodeBySupernodeAddress(ctx context.Context, address string) (*sntypes.SuperNode, error) { - a.logger.Debug(ctx, "Getting supernode by address", "address", address) - resp, err := a.client.SuperNode().GetSupernodeBySupernodeAddress(ctx, address) + if address == "" { + return nil, fmt.Errorf("address cannot be empty") + } + // Fast path: cache hit + if a.supernodeCache != nil { + if val, ok := a.supernodeCache.Get(address); ok && val != nil { + return val, nil + } + } + + // Deduplicate concurrent lookups for same address + res, err, _ := a.sf.Do("sn:"+address, func() (any, error) { + // Double-check cache inside singleflight + if a.supernodeCache != nil { + if val, ok := a.supernodeCache.Get(address); ok && val != nil { + return val, nil + } + } + + a.logger.Debug(ctx, "Getting supernode by address", "address", address) + resp, err := a.client.SuperNode().GetSupernodeBySupernodeAddress(ctx, address) + if err != nil { + a.logger.Error(ctx, "Failed to get supernode", "address", address, "error", err) + return nil, fmt.Errorf("failed to get supernode: %w", err) + } + if resp == nil { + a.logger.Error(ctx, "Received nil response for supernode", "address", address) + return nil, fmt.Errorf("received nil response for supernode %s", address) + } + if a.supernodeCache != nil { + a.supernodeCache.SetWithTTL(address, resp, cacheItemCost, cacheTTL) + } + return resp, nil + }) if err != nil { - a.logger.Error(ctx, "Failed to get supernode", "address", address, "error", err) - return nil, fmt.Errorf("failed to get supernode: %w", err) + return nil, err } - if resp == nil { - a.logger.Error(ctx, "Received nil response for supernode", "address", address) - return nil, fmt.Errorf("received nil response for supernode %s", address) + sn, _ := res.(*sntypes.SuperNode) + if sn == nil { + return nil, fmt.Errorf("supernode is nil") } - a.logger.Debug(ctx, "Successfully retrieved supernode", "address", address) - return resp, nil + return sn, nil } func (a *Adapter) GetSupernodeWithLatestAddress(ctx context.Context, address string) (*SuperNodeInfo, error) { a.logger.Debug(ctx, "Getting supernode with latest address", "address", address) - resp, err := a.client.SuperNode().GetSupernodeBySupernodeAddress(ctx, address) + // Route through cached method to avoid duplicate chain calls + resp, err := a.GetSupernodeBySupernodeAddress(ctx, address) if err != nil { a.logger.Error(ctx, "Failed to get supernode", "address", address, "error", err) return nil, fmt.Errorf("failed to get supernode: %w", err) @@ -147,19 +207,49 @@ func (a *Adapter) GetSupernodeWithLatestAddress(ctx context.Context, address str } func (a *Adapter) AccountInfoByAddress(ctx context.Context, addr string) (*authtypes.QueryAccountInfoResponse, error) { - a.logger.Debug(ctx, "Getting account info by address", "address", addr) - resp, err := a.client.Auth().AccountInfoByAddress(ctx, addr) - if err != nil { - a.logger.Error(ctx, "Failed to get account info", "address", addr, "error", err) - return nil, fmt.Errorf("failed to get account info: %w", err) + if addr == "" { + return nil, fmt.Errorf("address cannot be empty") } - if resp == nil { - a.logger.Error(ctx, "Received nil response for account info", "address", addr) - return nil, fmt.Errorf("received nil response for account info %s", addr) + // Fast path: cache hit + if a.accountCache != nil { + if val, ok := a.accountCache.Get(addr); ok && val != nil { + return val, nil + } } - a.logger.Debug(ctx, "Successfully retrieved account info", "address", addr) - return resp, nil + // Deduplicate concurrent fetches + res, err, _ := a.sf.Do("acct:"+addr, func() (any, error) { + // Double-check cache inside singleflight window + if a.accountCache != nil { + if val, ok := a.accountCache.Get(addr); ok && val != nil { + return val, nil + } + } + + a.logger.Debug(ctx, "Getting account info by address", "address", addr) + resp, err := a.client.Auth().AccountInfoByAddress(ctx, addr) + if err != nil { + a.logger.Error(ctx, "Failed to get account info", "address", addr, "error", err) + return nil, fmt.Errorf("failed to get account info: %w", err) + } + if resp == nil { + a.logger.Error(ctx, "Received nil response for account info", "address", addr) + return nil, fmt.Errorf("received nil response for account info %s", addr) + } + if a.accountCache != nil { + a.accountCache.SetWithTTL(addr, resp, cacheItemCost, cacheTTL) + } + a.logger.Debug(ctx, "Successfully retrieved account info", "address", addr) + return resp, nil + }) + if err != nil { + return nil, err + } + ai, _ := res.(*authtypes.QueryAccountInfoResponse) + if ai == nil { + return nil, fmt.Errorf("account info is nil") + } + return ai, nil } func (a *Adapter) GetAction(ctx context.Context, actionID string) (Action, error) { From 2938b5cfd0b133d74996b8b95674e763e38ab9ef Mon Sep 17 00:00:00 2001 From: Matee ullah Malik <46045452+mateeullahmalik@users.noreply.github.com> Date: Mon, 20 Oct 2025 15:53:52 +0500 Subject: [PATCH 29/36] Enhancements in cascadekit, supenrode (#205) --- p2p/kademlia/version_gate.go | 10 +- pkg/cascadekit/cascadekit_test.go | 66 +++++++ pkg/cascadekit/doc.go | 2 +- pkg/cascadekit/hash.go | 15 +- pkg/cascadekit/ids.go | 124 ++++++------- pkg/cascadekit/index.go | 25 ++- pkg/cascadekit/index_parse.go | 4 +- pkg/cascadekit/keyring_signatures.go | 14 ++ pkg/cascadekit/metadata.go | 4 +- pkg/cascadekit/parsers.go | 2 +- .../{highlevel.go => request_builder.go} | 13 +- pkg/cascadekit/rqid.go | 56 ++---- pkg/cascadekit/signatures.go | 42 +++-- pkg/cascadekit/verify.go | 8 + pkg/codec/codec.go | 16 +- pkg/codec/codec_default_test.go | 8 +- pkg/codec/raptorq.go | 26 +-- sdk/action/client.go | 26 ++- sdk/helpers/github_helper.go | 5 + sdk/task/helpers.go | 6 +- supernode/adaptors/rq.go | 15 +- supernode/cascade/config.go | 9 - supernode/cascade/helper.go | 88 ++++----- supernode/cascade/register.go | 169 ++++++++++-------- supernode/cascade/service.go | 23 ++- supernode/cmd/start.go | 2 +- tests/system/e2e_cascade_test.go | 76 +++++--- tests/system/go.mod | 4 +- tests/system/go.sum | 5 +- 29 files changed, 483 insertions(+), 380 deletions(-) create mode 100644 pkg/cascadekit/cascadekit_test.go create mode 100644 pkg/cascadekit/keyring_signatures.go rename pkg/cascadekit/{highlevel.go => request_builder.go} (53%) delete mode 100644 supernode/cascade/config.go diff --git a/p2p/kademlia/version_gate.go b/p2p/kademlia/version_gate.go index e9b70239..74c7dc77 100644 --- a/p2p/kademlia/version_gate.go +++ b/p2p/kademlia/version_gate.go @@ -1,6 +1,9 @@ package kademlia -import "strings" +import ( + "os" + "strings" +) var requiredVer string @@ -18,6 +21,11 @@ func requiredVersion() string { // Policy: required and peer must both be non-empty and exactly equal. func versionMismatch(peerVersion string) (required string, mismatch bool) { required = requiredVersion() + // Bypass strict gating during integration tests. + // Tests set os.Setenv("INTEGRATION_TEST", "true"). + if os.Getenv("INTEGRATION_TEST") == "true" { + return required, false + } peer := strings.TrimSpace(peerVersion) if required == "" || peer == "" || peer != required { return required, true diff --git a/pkg/cascadekit/cascadekit_test.go b/pkg/cascadekit/cascadekit_test.go new file mode 100644 index 00000000..d3299705 --- /dev/null +++ b/pkg/cascadekit/cascadekit_test.go @@ -0,0 +1,66 @@ +package cascadekit + +import ( + "encoding/base64" + "testing" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/klauspost/compress/zstd" +) + +func TestExtractIndexAndCreatorSig_Strict(t *testing.T) { + // too few parts + if _, _, err := ExtractIndexAndCreatorSig("abc"); err == nil { + t.Fatalf("expected error for single segment") + } + // too many parts + if _, _, err := ExtractIndexAndCreatorSig("a.b.c"); err == nil { + t.Fatalf("expected error for three segments") + } + // exactly two parts + a, b, err := ExtractIndexAndCreatorSig("a.b") + if err != nil || a != "a" || b != "b" { + t.Fatalf("unexpected result: a=%q b=%q err=%v", a, b, err) + } +} + +func TestParseCompressedIndexFile_Strict(t *testing.T) { + idx := IndexFile{LayoutIDs: []string{"L1", "L2"}, LayoutSignature: base64.StdEncoding.EncodeToString([]byte("sig"))} + idxB64, err := EncodeIndexB64(idx) + if err != nil { + t.Fatalf("encode index: %v", err) + } + payload := []byte(idxB64 + "." + base64.StdEncoding.EncodeToString([]byte("sig2")) + ".0") + + enc, _ := zstd.NewWriter(nil) + defer enc.Close() + compressed := enc.EncodeAll(payload, nil) + + got, err := ParseCompressedIndexFile(compressed) + if err != nil { + t.Fatalf("parse compressed index: %v", err) + } + if got.LayoutSignature != idx.LayoutSignature || len(got.LayoutIDs) != 2 { + t.Fatalf("unexpected index decoded: %+v", got) + } + + // malformed: only two segments + compressedBad := enc.EncodeAll([]byte("a.b"), nil) + if _, err := ParseCompressedIndexFile(compressedBad); err == nil { + t.Fatalf("expected error for two segments") + } + // malformed: four segments + compressedBad4 := enc.EncodeAll([]byte("a.b.c.d"), nil) + if _, err := ParseCompressedIndexFile(compressedBad4); err == nil { + t.Fatalf("expected error for four segments") + } +} + +func TestVerifySingleBlock(t *testing.T) { + if err := VerifySingleBlock(codec.Layout{Blocks: []codec.Block{{}}}); err != nil { + t.Fatalf("unexpected error for single block: %v", err) + } + if err := VerifySingleBlock(codec.Layout{Blocks: []codec.Block{{}, {}}}); err == nil { + t.Fatalf("expected error for multi-block layout") + } +} diff --git a/pkg/cascadekit/doc.go b/pkg/cascadekit/doc.go index 5fa61f7b..326ed87c 100644 --- a/pkg/cascadekit/doc.go +++ b/pkg/cascadekit/doc.go @@ -5,7 +5,7 @@ // Scope: // - Build and sign layout metadata (RaptorQ layout) and index files // - Generate redundant metadata files and index files + their IDs -// - Extract and decode index payloads from the on-chain signatures string +// - Extract and decode index payloads from the on-chain index signature format string // - Compute data hashes for request metadata // - Verify single-block layout consistency (explicit error if more than 1 block) // diff --git a/pkg/cascadekit/hash.go b/pkg/cascadekit/hash.go index 55288123..811f32cf 100644 --- a/pkg/cascadekit/hash.go +++ b/pkg/cascadekit/hash.go @@ -1,26 +1,15 @@ package cascadekit import ( - "bytes" "encoding/base64" - "io" - "lukechampine.com/blake3" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" ) -// ComputeBlake3Hash computes a 32-byte Blake3 hash of the given data. -func ComputeBlake3Hash(msg []byte) ([]byte, error) { - hasher := blake3.New(32, nil) - if _, err := io.Copy(hasher, bytes.NewReader(msg)); err != nil { - return nil, err - } - return hasher.Sum(nil), nil -} - // ComputeBlake3DataHashB64 computes a Blake3 hash of the input and // returns it as a base64-encoded string. func ComputeBlake3DataHashB64(data []byte) (string, error) { - h, err := ComputeBlake3Hash(data) + h, err := utils.Blake3Hash(data) if err != nil { return "", err } diff --git a/pkg/cascadekit/ids.go b/pkg/cascadekit/ids.go index 5c2b404d..bd9540c9 100644 --- a/pkg/cascadekit/ids.go +++ b/pkg/cascadekit/ids.go @@ -2,96 +2,65 @@ package cascadekit import ( "bytes" - "fmt" "strconv" "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/cosmos/btcutil/base58" + "github.com/klauspost/compress/zstd" ) // GenerateLayoutIDs computes IDs for redundant layout files (not the final index IDs). -// The ID is base58(blake3(zstd(layout_b64.layout_sig_b64.counter))). -func GenerateLayoutIDs(layoutB64, layoutSigB64 string, ic, max uint32) []string { - layoutWithSig := fmt.Sprintf("%s.%s", layoutB64, layoutSigB64) - layoutIDs := make([]string, max) - - var buffer bytes.Buffer - buffer.Grow(len(layoutWithSig) + 10) - - for i := uint32(0); i < max; i++ { - buffer.Reset() - buffer.WriteString(layoutWithSig) - buffer.WriteByte('.') - buffer.WriteString(fmt.Sprintf("%d", ic+i)) - - compressedData, err := utils.ZstdCompress(buffer.Bytes()) - if err != nil { - continue - } - - hash, err := utils.Blake3Hash(compressedData) - if err != nil { - continue - } - - layoutIDs[i] = base58.Encode(hash) - } - - return layoutIDs +// The ID is base58(blake3(zstd(layout_signature_format.counter))). +// layoutSignatureFormat must be: base64(JSON(layout)).layout_signature_base64 +func GenerateLayoutIDs(layoutSignatureFormat string, ic, max uint32) ([]string, error) { + return generateIDs([]byte(layoutSignatureFormat), ic, max) } -// GenerateIndexIDs computes IDs for index files from the full signatures string. -func GenerateIndexIDs(signatures string, ic, max uint32) []string { - indexFileIDs := make([]string, max) - - var buffer bytes.Buffer - buffer.Grow(len(signatures) + 10) - - for i := uint32(0); i < max; i++ { - buffer.Reset() - buffer.WriteString(signatures) - buffer.WriteByte('.') - buffer.WriteString(fmt.Sprintf("%d", ic+i)) - - compressedData, err := utils.ZstdCompress(buffer.Bytes()) - if err != nil { - continue - } - hash, err := utils.Blake3Hash(compressedData) - if err != nil { - continue - } - indexFileIDs[i] = base58.Encode(hash) - } - return indexFileIDs +// GenerateIndexIDs computes IDs for index files from the full index signature format string. +func GenerateIndexIDs(indexSignatureFormat string, ic, max uint32) ([]string, error) { + return generateIDs([]byte(indexSignatureFormat), ic, max) } // getIDFiles generates ID files by appending a '.' and counter, compressing, // and returning both IDs and compressed payloads. -func getIDFiles(file []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) { +// generateIDFiles builds compressed ID files from a base payload and returns +// both their content-addressed IDs and the compressed files themselves. +// For each counter in [ic..ic+max-1], the payload is: +// +// base + '.' + counter +// +// then zstd-compressed; the ID is base58(blake3(compressed)). +func generateIDFiles(base []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) { idFiles := make([][]byte, 0, max) ids = make([]string, 0, max) var buffer bytes.Buffer + // Reuse a single zstd encoder across iterations + enc, zerr := zstd.NewWriter(nil) + if zerr != nil { + return ids, idFiles, errors.Errorf("compress identifiers file: %w", zerr) + } + defer enc.Close() + for i := uint32(0); i < max; i++ { buffer.Reset() counter := ic + i - buffer.Write(file) + buffer.Write(base) buffer.WriteByte(SeparatorByte) - buffer.WriteString(strconv.Itoa(int(counter))) + // Append counter efficiently without intermediate string + var tmp [20]byte + cnt := strconv.AppendUint(tmp[:0], uint64(counter), 10) + buffer.Write(cnt) - compressedData, err := utils.ZstdCompress(buffer.Bytes()) - if err != nil { - return ids, idFiles, errors.Errorf("compress identifiers file: %w", err) - } + compressedData := enc.EncodeAll(buffer.Bytes(), nil) idFiles = append(idFiles, compressedData) hash, err := utils.Blake3Hash(compressedData) if err != nil { - return ids, idFiles, errors.Errorf("sha3-256-hash error getting an id file: %w", err) + return ids, idFiles, errors.Errorf("blake3 hash error getting an id file: %w", err) } ids = append(ids, base58.Encode(hash)) @@ -99,3 +68,36 @@ func getIDFiles(file []byte, ic uint32, max uint32) (ids []string, files [][]byt return ids, idFiles, nil } + +// generateIDs computes base58(blake3(zstd(base + '.' + counter))) for counters ic..ic+max-1. +// It reuses a single zstd encoder and avoids per-iteration heap churn. +func generateIDs(base []byte, ic, max uint32) ([]string, error) { + ids := make([]string, max) + + var buffer bytes.Buffer + // Reserve base length + dot + up to 10 digits + buffer.Grow(len(base) + 12) + + enc, err := zstd.NewWriter(nil) + if err != nil { + return nil, errors.Errorf("zstd encoder init: %w", err) + } + defer enc.Close() + + for i := uint32(0); i < max; i++ { + buffer.Reset() + buffer.Write(base) + buffer.WriteByte(SeparatorByte) + var tmp [20]byte + cnt := strconv.AppendUint(tmp[:0], uint64(ic+i), 10) + buffer.Write(cnt) + + compressed := enc.EncodeAll(buffer.Bytes(), nil) + h, err := utils.Blake3Hash(compressed) + if err != nil { + return nil, errors.Errorf("blake3 hash (i=%d): %w", i, err) + } + ids[i] = base58.Encode(h) + } + return ids, nil +} diff --git a/pkg/cascadekit/index.go b/pkg/cascadekit/index.go index e0cb3dce..456b365f 100644 --- a/pkg/cascadekit/index.go +++ b/pkg/cascadekit/index.go @@ -24,13 +24,13 @@ func BuildIndex(layoutIDs []string, layoutSigB64 string) IndexFile { return IndexFile{LayoutIDs: layoutIDs, LayoutSignature: layoutSigB64} } -// EncodeIndexB64 marshals an index file and returns both the raw JSON and base64. -func EncodeIndexB64(idx IndexFile) (b64 string, raw []byte, err error) { - raw, err = json.Marshal(idx) +// EncodeIndexB64 marshals an index file and returns its base64-encoded JSON. +func EncodeIndexB64(idx IndexFile) (string, error) { + raw, err := json.Marshal(idx) if err != nil { - return "", nil, errors.Errorf("marshal index file: %w", err) + return "", errors.Errorf("marshal index file: %w", err) } - return base64.StdEncoding.EncodeToString(raw), raw, nil + return base64.StdEncoding.EncodeToString(raw), nil } // DecodeIndexB64 decodes base64(JSON(IndexFile)). @@ -46,17 +46,12 @@ func DecodeIndexB64(data string) (IndexFile, error) { return indexFile, nil } -// ExtractIndexAndCreatorSig splits a signatures string formatted as: +// ExtractIndexAndCreatorSig splits a signature-format string formatted as: // Base64(index_json).Base64(creator_signature) -func ExtractIndexAndCreatorSig(signatures string) (indexB64 string, creatorSigB64 string, err error) { - parts := strings.Split(signatures, ".") - if len(parts) < 2 { - return "", "", errors.New("invalid signatures format") +func ExtractIndexAndCreatorSig(indexSignatureFormat string) (indexB64 string, creatorSigB64 string, err error) { + parts := strings.Split(indexSignatureFormat, ".") + if len(parts) != 2 { + return "", "", errors.New("invalid index signature format: expected 2 segments (index_b64.creator_sig_b64)") } return parts[0], parts[1], nil } - -// MakeSignatureFormat composes the final signatures string. -func MakeSignatureFormat(indexB64, creatorSigB64 string) string { - return indexB64 + "." + creatorSigB64 -} diff --git a/pkg/cascadekit/index_parse.go b/pkg/cascadekit/index_parse.go index 0fbf3dca..342728d6 100644 --- a/pkg/cascadekit/index_parse.go +++ b/pkg/cascadekit/index_parse.go @@ -15,8 +15,8 @@ func ParseCompressedIndexFile(data []byte) (IndexFile, error) { return IndexFile{}, errors.Errorf("decompress index file: %w", err) } parts := bytes.Split(decompressed, []byte{SeparatorByte}) - if len(parts) < 2 { - return IndexFile{}, errors.New("invalid index file format") + if len(parts) != 3 { + return IndexFile{}, errors.New("invalid index file format: expected 3 parts (index_b64.creator_sig_b64.counter)") } return DecodeIndexB64(string(parts[0])) } diff --git a/pkg/cascadekit/keyring_signatures.go b/pkg/cascadekit/keyring_signatures.go new file mode 100644 index 00000000..968af4b5 --- /dev/null +++ b/pkg/cascadekit/keyring_signatures.go @@ -0,0 +1,14 @@ +package cascadekit + +import ( + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + keyringpkg "github.com/LumeraProtocol/supernode/v2/pkg/keyring" + cosmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" +) + +// CreateSignaturesWithKeyring signs layout and index using a Cosmos keyring. +// These helpers centralize keyring-backed signing for clarity. +func CreateSignaturesWithKeyring(layout codec.Layout, kr cosmoskeyring.Keyring, keyName string, ic, max uint32) (string, []string, error) { + signer := func(msg []byte) ([]byte, error) { return keyringpkg.SignBytes(kr, keyName, msg) } + return CreateSignatures(layout, signer, ic, max) +} diff --git a/pkg/cascadekit/metadata.go b/pkg/cascadekit/metadata.go index 534ef793..a77ddfd4 100644 --- a/pkg/cascadekit/metadata.go +++ b/pkg/cascadekit/metadata.go @@ -6,12 +6,12 @@ import ( // NewCascadeMetadata creates a types.CascadeMetadata for RequestAction. // The keeper will populate rq_ids_max; rq_ids_ids is for FinalizeAction only. -func NewCascadeMetadata(dataHashB64, fileName string, rqIdsIc uint64, signatures string, public bool) actiontypes.CascadeMetadata { +func NewCascadeMetadata(dataHashB64, fileName string, rqIdsIc uint64, indexSignatureFormat string, public bool) actiontypes.CascadeMetadata { return actiontypes.CascadeMetadata{ DataHash: dataHashB64, FileName: fileName, RqIdsIc: rqIdsIc, - Signatures: signatures, + Signatures: indexSignatureFormat, Public: public, } } diff --git a/pkg/cascadekit/parsers.go b/pkg/cascadekit/parsers.go index be950e4f..eb90dde0 100644 --- a/pkg/cascadekit/parsers.go +++ b/pkg/cascadekit/parsers.go @@ -2,11 +2,11 @@ package cascadekit import ( "bytes" + "encoding/json" "github.com/LumeraProtocol/supernode/v2/pkg/codec" "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/utils" - json "github.com/json-iterator/go" ) // ParseRQMetadataFile parses a compressed rq metadata file into layout, signature and counter. diff --git a/pkg/cascadekit/highlevel.go b/pkg/cascadekit/request_builder.go similarity index 53% rename from pkg/cascadekit/highlevel.go rename to pkg/cascadekit/request_builder.go index 16c0072d..695e2fdf 100644 --- a/pkg/cascadekit/highlevel.go +++ b/pkg/cascadekit/request_builder.go @@ -3,28 +3,21 @@ package cascadekit import ( actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" "github.com/LumeraProtocol/supernode/v2/pkg/codec" - keyringpkg "github.com/LumeraProtocol/supernode/v2/pkg/keyring" cosmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" ) -// CreateSignaturesWithKeyring signs layout and index using a Cosmos keyring. -func CreateSignaturesWithKeyring(layout codec.Layout, kr cosmoskeyring.Keyring, keyName string, ic, max uint32) (string, []string, error) { - signer := func(msg []byte) ([]byte, error) { return keyringpkg.SignBytes(kr, keyName, msg) } - return CreateSignatures(layout, signer, ic, max) -} - // BuildCascadeRequest builds a Cascade request metadata from layout and file bytes. -// It computes blake3(data) base64, creates the signatures string and index IDs, +// It computes blake3(data) base64, creates the index signature format and index IDs, // and returns a CascadeMetadata ready for RequestAction. func BuildCascadeRequest(layout codec.Layout, fileBytes []byte, fileName string, kr cosmoskeyring.Keyring, keyName string, ic, max uint32, public bool) (actiontypes.CascadeMetadata, []string, error) { dataHashB64, err := ComputeBlake3DataHashB64(fileBytes) if err != nil { return actiontypes.CascadeMetadata{}, nil, err } - signatures, indexIDs, err := CreateSignaturesWithKeyring(layout, kr, keyName, ic, max) + indexSignatureFormat, indexIDs, err := CreateSignaturesWithKeyring(layout, kr, keyName, ic, max) if err != nil { return actiontypes.CascadeMetadata{}, nil, err } - meta := NewCascadeMetadata(dataHashB64, fileName, uint64(ic), signatures, public) + meta := NewCascadeMetadata(dataHashB64, fileName, uint64(ic), indexSignatureFormat, public) return meta, indexIDs, nil } diff --git a/pkg/cascadekit/rqid.go b/pkg/cascadekit/rqid.go index 3a05eb94..8f6a85aa 100644 --- a/pkg/cascadekit/rqid.go +++ b/pkg/cascadekit/rqid.go @@ -1,63 +1,27 @@ package cascadekit import ( - "context" - "encoding/json" - - "github.com/LumeraProtocol/supernode/v2/pkg/codec" "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" ) -// GenRQIdentifiersFilesResponse groups the generated files and their IDs. -type GenRQIdentifiersFilesResponse struct { - // IDs of the Redundant Metadata Files -- len(RQIDs) == len(RedundantMetadataFiles) - RQIDs []string - // RedundantMetadataFiles is a list of redundant files generated from the Metadata file - RedundantMetadataFiles [][]byte -} - -// GenerateLayoutFiles builds redundant metadata files from layout and signature. +// GenerateLayoutFilesFromB64 builds redundant metadata files using a precomputed +// base64(JSON(layout)) and the layout signature, avoiding an extra JSON marshal. // The content is: base64(JSON(layout)).layout_signature -func GenerateLayoutFiles(ctx context.Context, layout codec.Layout, layoutSigB64 string, ic uint32, max uint32) (GenRQIdentifiersFilesResponse, error) { - // Validate single-block to match package invariant - if len(layout.Blocks) != 1 { - return GenRQIdentifiersFilesResponse{}, errors.New("layout must contain exactly one block") - } - - metadataFile, err := jsonMarshal(layout) - if err != nil { - return GenRQIdentifiersFilesResponse{}, errors.Errorf("marshal layout: %w", err) - } - b64Encoded := utils.B64Encode(metadataFile) - - // Compose: base64(JSON(layout)).layout_signature - enc := make([]byte, 0, len(b64Encoded)+1+len(layoutSigB64)) - enc = append(enc, b64Encoded...) +func GenerateLayoutFilesFromB64(layoutB64 []byte, layoutSigB64 string, ic uint32, max uint32) (ids []string, files [][]byte, err error) { + enc := make([]byte, 0, len(layoutB64)+1+len(layoutSigB64)) + enc = append(enc, layoutB64...) enc = append(enc, SeparatorByte) enc = append(enc, []byte(layoutSigB64)...) - - ids, files, err := getIDFiles(enc, ic, max) - if err != nil { - return GenRQIdentifiersFilesResponse{}, errors.Errorf("get ID Files: %w", err) - } - - return GenRQIdentifiersFilesResponse{ - RedundantMetadataFiles: files, - RQIDs: ids, - }, nil + return generateIDFiles(enc, ic, max) } -// GenerateIndexFiles generates index files and their IDs from the full signatures format. -func GenerateIndexFiles(ctx context.Context, signaturesFormat string, ic uint32, max uint32) (indexIDs []string, indexFiles [][]byte, err error) { - // Use the full signatures format that matches what was sent during RequestAction +// GenerateIndexFiles generates index files and their IDs from the full index signature format. +func GenerateIndexFiles(indexSignatureFormat string, ic uint32, max uint32) (indexIDs []string, indexFiles [][]byte, err error) { + // Use the full index signature format that matches what was sent during RequestAction // The chain expects this exact format for ID generation - indexIDs, indexFiles, err = getIDFiles([]byte(signaturesFormat), ic, max) + indexIDs, indexFiles, err = generateIDFiles([]byte(indexSignatureFormat), ic, max) if err != nil { return nil, nil, errors.Errorf("get index ID files: %w", err) } return indexIDs, indexFiles, nil } - -// jsonMarshal marshals a value to JSON. -func jsonMarshal(v interface{}) ([]byte, error) { return json.Marshal(v) } diff --git a/pkg/cascadekit/signatures.go b/pkg/cascadekit/signatures.go index 0c71e492..b8a02da9 100644 --- a/pkg/cascadekit/signatures.go +++ b/pkg/cascadekit/signatures.go @@ -33,35 +33,53 @@ func SignLayoutB64(layout codec.Layout, signer Signer) (layoutB64 string, layout return layoutB64, layoutSigB64, nil } -// CreateSignatures reproduces the cascade signature format and index IDs: +// SignIndexB64 marshals the index to JSON, base64-encodes it, and signs the +// base64 payload, returning both the index base64 and creator-signature base64. +func SignIndexB64(idx IndexFile, signer Signer) (indexB64 string, creatorSigB64 string, err error) { + raw, err := json.Marshal(idx) + if err != nil { + return "", "", errors.Errorf("marshal index file: %w", err) + } + indexB64 = base64.StdEncoding.EncodeToString(raw) + + sig, err := signer([]byte(indexB64)) + if err != nil { + return "", "", errors.Errorf("sign index: %w", err) + } + creatorSigB64 = base64.StdEncoding.EncodeToString(sig) + return indexB64, creatorSigB64, nil +} + +// CreateSignatures produces the index signature format and index IDs: // // Base64(index_json).Base64(creator_signature) // // It validates the layout has exactly one block. -func CreateSignatures(layout codec.Layout, signer Signer, ic, max uint32) (signatures string, indexIDs []string, err error) { +func CreateSignatures(layout codec.Layout, signer Signer, ic, max uint32) (indexSignatureFormat string, indexIDs []string, err error) { layoutB64, layoutSigB64, err := SignLayoutB64(layout, signer) if err != nil { return "", nil, err } // Generate layout IDs (not returned; used to populate the index file) - layoutIDs := GenerateLayoutIDs(layoutB64, layoutSigB64, ic, max) + layoutSignatureFormat := layoutB64 + "." + layoutSigB64 + layoutIDs, err := GenerateLayoutIDs(layoutSignatureFormat, ic, max) + if err != nil { + return "", nil, err + } // Build and sign the index file idx := BuildIndex(layoutIDs, layoutSigB64) - indexB64, _, err := EncodeIndexB64(idx) + indexB64, creatorSigB64, err := SignIndexB64(idx, signer) if err != nil { return "", nil, err } + indexSignatureFormat = fmt.Sprintf("%s.%s", indexB64, creatorSigB64) - creatorSig, err := signer([]byte(indexB64)) + // Generate the index IDs (these are the RQIDs sent to chain) + indexIDs, err = GenerateIndexIDs(indexSignatureFormat, ic, max) if err != nil { - return "", nil, errors.Errorf("sign index: %w", err) + return "", nil, err } - creatorSigB64 := base64.StdEncoding.EncodeToString(creatorSig) - signatures = fmt.Sprintf("%s.%s", indexB64, creatorSigB64) - - // Generate the index IDs (these are the RQIDs sent to chain) - indexIDs = GenerateIndexIDs(signatures, ic, max) - return signatures, indexIDs, nil + return indexSignatureFormat, indexIDs, nil } diff --git a/pkg/cascadekit/verify.go b/pkg/cascadekit/verify.go index 5c4ff8a4..74331dde 100644 --- a/pkg/cascadekit/verify.go +++ b/pkg/cascadekit/verify.go @@ -20,3 +20,11 @@ func VerifySingleBlockIDs(ticket, local codec.Layout) error { } return nil } + +// VerifySingleBlock checks that a layout contains exactly one block. +func VerifySingleBlock(layout codec.Layout) error { + if len(layout.Blocks) != 1 { + return errors.New("layout must contain exactly one block") + } + return nil +} diff --git a/pkg/codec/codec.go b/pkg/codec/codec.go index e9a88a1f..73c31a2a 100644 --- a/pkg/codec/codec.go +++ b/pkg/codec/codec.go @@ -4,9 +4,10 @@ import ( "context" ) -// EncodeResponse represents the response of the encode request. +// EncodeResponse represents the response of the encode request. +// Layout contains the single-block layout produced by the encoder. type EncodeResponse struct { - Metadata Layout + Layout Layout SymbolsDir string } @@ -30,13 +31,20 @@ type EncodeRequest struct { Path string DataSize int } +type CreateMetadataRequest struct { + Path string +} + +// CreateMetadataResponse returns the Layout. +type CreateMetadataResponse struct { + Layout Layout +} // RaptorQ contains methods for request services from RaptorQ service. type Codec interface { // Encode a file Encode(ctx context.Context, req EncodeRequest) (EncodeResponse, error) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) - // CreateMetadata builds the single-block layout metadata for the given file // without generating RaptorQ symbols. - CreateMetadata(ctx context.Context, path string) (Layout, error) + CreateMetadata(ctx context.Context, req CreateMetadataRequest) (CreateMetadataResponse, error) } diff --git a/pkg/codec/codec_default_test.go b/pkg/codec/codec_default_test.go index a55e605d..79b97bd1 100644 --- a/pkg/codec/codec_default_test.go +++ b/pkg/codec/codec_default_test.go @@ -34,7 +34,7 @@ func TestEncode_ToDirA(t *testing.T) { t.Logf("encoded to: %s", resp.SymbolsDir) // Log theoretical minimum percentage of symbols needed per block - for _, b := range resp.Metadata.Blocks { + for _, b := range resp.Layout.Blocks { s := int64(rqSymbolSize) if s <= 0 { s = 65535 @@ -131,15 +131,15 @@ func TestCreateMetadata_SaveToFile(t *testing.T) { c := NewRaptorQCodec(BaseDir) // Create metadata using the codec and write it next to the input file. - layout, err := c.CreateMetadata(ctx, InputPath) + resp, err := c.CreateMetadata(ctx, CreateMetadataRequest{Path: InputPath}) if err != nil { t.Fatalf("create metadata: %v", err) } - data, err := json.MarshalIndent(layout, "", " ") + data, err := json.MarshalIndent(resp.Layout, "", " ") if err != nil { t.Fatalf("marshal metadata: %v", err) } - outPath := " . " + ".layout.json" + outPath := InputPath + ".layout.json" if err := os.WriteFile(outPath, data, 0o644); err != nil { t.Fatalf("write output: %v", err) } diff --git a/pkg/codec/raptorq.go b/pkg/codec/raptorq.go index d2761bd9..487f92d8 100644 --- a/pkg/codec/raptorq.go +++ b/pkg/codec/raptorq.go @@ -78,13 +78,13 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons } var encodeResp EncodeResponse - if err := json.Unmarshal(layoutData, &encodeResp.Metadata); err != nil { + if err := json.Unmarshal(layoutData, &encodeResp.Layout); err != nil { return EncodeResponse{}, fmt.Errorf("unmarshal layout: %w", err) } encodeResp.SymbolsDir = symbolsDir // Enforce single-block output; abort if multiple blocks are produced - if n := len(encodeResp.Metadata.Blocks); n != 1 { + if n := len(encodeResp.Layout.Blocks); n != 1 { return EncodeResponse{}, fmt.Errorf("raptorq encode produced %d blocks; single-block layout is required", n) } logtrace.Info(ctx, "rq: encode ok", logtrace.Fields{"symbols_dir": encodeResp.SymbolsDir}) @@ -92,21 +92,21 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons } // CreateMetadata builds only the layout metadata for the given file without generating symbols. -func (rq *raptorQ) CreateMetadata(ctx context.Context, path string) (Layout, error) { +func (rq *raptorQ) CreateMetadata(ctx context.Context, req CreateMetadataRequest) (CreateMetadataResponse, error) { // Populate fields; include data-size by stat-ing the file to preserve existing log fields fields := logtrace.Fields{ logtrace.FieldMethod: "CreateMetadata", logtrace.FieldModule: "rq", - "path": path, + "path": req.Path, } - if fi, err := os.Stat(path); err == nil { + if fi, err := os.Stat(req.Path); err == nil { fields["data-size"] = int(fi.Size()) } logtrace.Info(ctx, "rq: create-metadata start", fields) processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) if err != nil { - return Layout{}, fmt.Errorf("create RaptorQ processor: %w", err) + return CreateMetadataResponse{}, fmt.Errorf("create RaptorQ processor: %w", err) } defer processor.Free() logtrace.Debug(ctx, "RaptorQ processor created", fields) @@ -122,33 +122,33 @@ func (rq *raptorQ) CreateMetadata(ctx context.Context, path string) (Layout, err tmpDir, err := os.MkdirTemp(base, "rq_meta_*") if err != nil { fields[logtrace.FieldError] = err.Error() - return Layout{}, fmt.Errorf("mkdir temp dir: %w", err) + return CreateMetadataResponse{}, fmt.Errorf("mkdir temp dir: %w", err) } defer os.RemoveAll(tmpDir) layoutPath := filepath.Join(tmpDir, "layout.json") // Use rq-go's metadata-only creation; no symbols are produced here. - resp, err := processor.CreateMetadata(path, layoutPath, blockSize) + resp, err := processor.CreateMetadata(req.Path, layoutPath, blockSize) if err != nil { fields[logtrace.FieldError] = err.Error() - return Layout{}, fmt.Errorf("raptorq create metadata: %w", err) + return CreateMetadataResponse{}, fmt.Errorf("raptorq create metadata: %w", err) } layoutData, err := os.ReadFile(resp.LayoutFilePath) if err != nil { fields[logtrace.FieldError] = err.Error() - return Layout{}, fmt.Errorf("read layout %s: %w", resp.LayoutFilePath, err) + return CreateMetadataResponse{}, fmt.Errorf("read layout %s: %w", resp.LayoutFilePath, err) } var layout Layout if err := json.Unmarshal(layoutData, &layout); err != nil { - return Layout{}, fmt.Errorf("unmarshal layout: %w", err) + return CreateMetadataResponse{}, fmt.Errorf("unmarshal layout: %w", err) } // Enforce single-block output; abort if multiple blocks are produced if n := len(layout.Blocks); n != 1 { - return Layout{}, fmt.Errorf("raptorq metadata produced %d blocks; single-block layout is required", n) + return CreateMetadataResponse{}, fmt.Errorf("raptorq metadata produced %d blocks; single-block layout is required", n) } logtrace.Info(ctx, "rq: create-metadata ok", logtrace.Fields{"blocks": len(layout.Blocks)}) - return layout, nil + return CreateMetadataResponse{Layout: layout}, nil } diff --git a/sdk/action/client.go b/sdk/action/client.go index 82ffa052..af7bca07 100644 --- a/sdk/action/client.go +++ b/sdk/action/client.go @@ -23,6 +23,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" "github.com/LumeraProtocol/supernode/v2/pkg/codec" keyringpkg "github.com/LumeraProtocol/supernode/v2/pkg/keyring" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/cosmos/cosmos-sdk/crypto/keyring" ) @@ -249,17 +250,14 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath if err != nil { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("stat file: %w", err) } - data, err := os.ReadFile(filePath) - if err != nil { - return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("read file: %w", err) - } // Build layout metadata only (no symbols). Supernodes will create symbols. rq := codec.NewRaptorQCodec("") - layout, err := rq.CreateMetadata(ctx, filePath) + metaResp, err := rq.CreateMetadata(ctx, codec.CreateMetadataRequest{Path: filePath}) if err != nil { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("raptorq create metadata: %w", err) } + layout := metaResp.Layout // Derive `max` from chain params, then create signatures and index IDs paramsResp, err := c.lumeraClient.GetActionParams(ctx) @@ -277,22 +275,24 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath // Pick a random initial counter in [1,100] rnd, _ := crand.Int(crand.Reader, big.NewInt(100)) ic := uint32(rnd.Int64() + 1) // 1..100 - signatures, _, err := cascadekit.CreateSignaturesWithKeyring(layout, c.keyring, c.config.Account.KeyName, ic, max) + // Create signatures from the layout struct + indexSignatureFormat, _, err := cascadekit.CreateSignaturesWithKeyring(layout, c.keyring, c.config.Account.KeyName, ic, max) if err != nil { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("create signatures: %w", err) } - // Compute data hash (blake3) as base64 - dataHashB64, err := cascadekit.ComputeBlake3DataHashB64(data) + // Compute data hash (blake3) as base64 using a streaming file hash to avoid loading entire file + h, err := utils.ComputeHashOfFile(filePath) if err != nil { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("hash data: %w", err) } + dataHashB64 := base64.StdEncoding.EncodeToString(h) // Derive file name from path fileName := filepath.Base(filePath) // Build metadata proto - meta := cascadekit.NewCascadeMetadata(dataHashB64, fileName, uint64(ic), signatures, public) + meta := cascadekit.NewCascadeMetadata(dataHashB64, fileName, uint64(ic), indexSignatureFormat, public) // Fetch params (already fetched) to get denom and expiration duration denom := paramsResp.Params.BaseActionFee.Denom @@ -316,15 +316,11 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath // GenerateStartCascadeSignatureFromFile computes blake3(file) and signs it with the configured key. // Returns base64-encoded signature suitable for StartCascade. func (c *ClientImpl) GenerateStartCascadeSignatureFromFile(ctx context.Context, filePath string) (string, error) { - data, err := os.ReadFile(filePath) - if err != nil { - return "", fmt.Errorf("read file: %w", err) - } - hash, err := cascadekit.ComputeBlake3Hash(data) + h, err := utils.ComputeHashOfFile(filePath) if err != nil { return "", fmt.Errorf("blake3: %w", err) } - sig, err := keyringpkg.SignBytes(c.keyring, c.config.Account.KeyName, hash) + sig, err := keyringpkg.SignBytes(c.keyring, c.config.Account.KeyName, h) if err != nil { return "", fmt.Errorf("sign hash: %w", err) } diff --git a/sdk/helpers/github_helper.go b/sdk/helpers/github_helper.go index edf5eefc..0c028c55 100644 --- a/sdk/helpers/github_helper.go +++ b/sdk/helpers/github_helper.go @@ -1,6 +1,7 @@ package helpers import ( + "os" "strings" "sync" @@ -16,6 +17,10 @@ var ( // The value is fetched once per process and cached. If lookup fails, it returns // an empty string so callers can gracefully skip strict version gating. func ResolveRequiredSupernodeVersion() string { + // Bypass strict version gating during integration tests. + if os.Getenv("INTEGRATION_TEST") == "true" { + return "" + } requiredVersionOnce.Do(func() { client := gh.NewClient("LumeraProtocol/supernode") if client != nil { diff --git a/sdk/task/helpers.go b/sdk/task/helpers.go index 2d2b7391..1612f12d 100644 --- a/sdk/task/helpers.go +++ b/sdk/task/helpers.go @@ -10,7 +10,7 @@ import ( "sort" "strings" - "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" ) @@ -145,7 +145,7 @@ func orderSupernodesByDeterministicDistance(seed string, sns lumera.Supernodes) return sns } // Precompute seed hash (blake3) - seedHash, err := cascadekit.ComputeBlake3Hash([]byte(seed)) + seedHash, err := utils.Blake3Hash([]byte(seed)) if err != nil { return sns } @@ -160,7 +160,7 @@ func orderSupernodesByDeterministicDistance(seed string, sns lumera.Supernodes) if id == "" { id = sn.GrpcEndpoint } - nHash, err := cascadekit.ComputeBlake3Hash([]byte(id)) + nHash, err := utils.Blake3Hash([]byte(id)) if err != nil { nd = append(nd, nodeDist{sn: sn, distance: new(big.Int).SetInt64(0)}) continue diff --git a/supernode/adaptors/rq.go b/supernode/adaptors/rq.go index 5586edf8..b8efa1dd 100644 --- a/supernode/adaptors/rq.go +++ b/supernode/adaptors/rq.go @@ -2,19 +2,20 @@ package adaptors import ( "context" + "os" "github.com/LumeraProtocol/supernode/v2/pkg/codec" ) // CodecService wraps codec operations used by cascade type CodecService interface { - EncodeInput(ctx context.Context, actionID string, path string) (EncodeResult, error) + EncodeInput(ctx context.Context, actionID string, filePath string) (EncodeResult, error) Decode(ctx context.Context, req DecodeRequest) (DecodeResult, error) } type EncodeResult struct { SymbolsDir string - Metadata codec.Layout + Layout codec.Layout } type DecodeRequest struct { @@ -32,12 +33,16 @@ type codecImpl struct{ codec codec.Codec } func NewCodecService(c codec.Codec) CodecService { return &codecImpl{codec: c} } -func (c *codecImpl) EncodeInput(ctx context.Context, actionID, path string) (EncodeResult, error) { - res, err := c.codec.Encode(ctx, codec.EncodeRequest{TaskID: actionID, Path: path}) +func (c *codecImpl) EncodeInput(ctx context.Context, actionID, filePath string) (EncodeResult, error) { + var size int + if fi, err := os.Stat(filePath); err == nil { + size = int(fi.Size()) + } + res, err := c.codec.Encode(ctx, codec.EncodeRequest{TaskID: actionID, Path: filePath, DataSize: size}) if err != nil { return EncodeResult{}, err } - return EncodeResult{SymbolsDir: res.SymbolsDir, Metadata: res.Metadata}, nil + return EncodeResult{SymbolsDir: res.SymbolsDir, Layout: res.Layout}, nil } func (c *codecImpl) Decode(ctx context.Context, req DecodeRequest) (DecodeResult, error) { diff --git a/supernode/cascade/config.go b/supernode/cascade/config.go deleted file mode 100644 index bb32ca13..00000000 --- a/supernode/cascade/config.go +++ /dev/null @@ -1,9 +0,0 @@ -package cascade - -// Config contains settings for the cascade service -type Config struct { - // SupernodeAccountAddress is the on-chain account address of this supernode. - SupernodeAccountAddress string `mapstructure:"-" json:"-"` - - RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"` -} diff --git a/supernode/cascade/helper.go b/supernode/cascade/helper.go index 491e2174..a2006354 100644 --- a/supernode/cascade/helper.go +++ b/supernode/cascade/helper.go @@ -48,95 +48,96 @@ func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, b return task.wrapErr(ctx, "failed to get top SNs", err, f) } logtrace.Info(ctx, "register: top-supernodes fetch ok", f) - if !supernode.Exists(top.Supernodes, task.config.SupernodeAccountAddress) { + if !supernode.Exists(top.Supernodes, task.SupernodeAccountAddress) { addresses := make([]string, len(top.Supernodes)) for i, sn := range top.Supernodes { addresses[i] = sn.SupernodeAccount } - logtrace.Debug(ctx, "Supernode not in top list", logtrace.Fields{"currentAddress": task.config.SupernodeAccountAddress, "topSupernodes": addresses}) - return task.wrapErr(ctx, "current supernode does not exist in the top SNs list", errors.Errorf("current address: %s, top supernodes: %v", task.config.SupernodeAccountAddress, addresses), f) + logtrace.Debug(ctx, "Supernode not in top list", logtrace.Fields{"currentAddress": task.SupernodeAccountAddress, "topSupernodes": addresses}) + return task.wrapErr(ctx, "current supernode does not exist in the top SNs list", errors.Errorf("current address: %s, top supernodes: %v", task.SupernodeAccountAddress, addresses), f) } logtrace.Info(ctx, "register: top-supernode verified", f) return nil } -func (task *CascadeRegistrationTask) encodeInput(ctx context.Context, actionID string, path string, f logtrace.Fields) (*adaptors.EncodeResult, error) { +func (task *CascadeRegistrationTask) encodeInput(ctx context.Context, actionID string, filePath string, f logtrace.Fields) (*adaptors.EncodeResult, error) { if f == nil { f = logtrace.Fields{} } f[logtrace.FieldActionID] = actionID - f["input_path"] = path + f["file_path"] = filePath logtrace.Info(ctx, "register: encode input start", f) - resp, err := task.RQ.EncodeInput(ctx, actionID, path) + res, err := task.RQ.EncodeInput(ctx, actionID, filePath) if err != nil { return nil, task.wrapErr(ctx, "failed to encode data", err, f) } // Enrich fields with result for subsequent logs - f["symbols_dir"] = resp.SymbolsDir + f["symbols_dir"] = res.SymbolsDir logtrace.Info(ctx, "register: encode input ok", f) - return &resp, nil + return &res, nil } -func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context.Context, encoded string, creator string, encodedMeta codec.Layout, f logtrace.Fields) (codec.Layout, string, error) { - if f == nil { - f = logtrace.Fields{} - } - f[logtrace.FieldCreator] = creator - logtrace.Info(ctx, "register: verify+decode layout start", f) - indexFileB64, creatorSig, err := cascadekit.ExtractIndexAndCreatorSig(encoded) +// ValidateIndexAndLayout verifies: +// - creator signature over the index payload (index_b64) +// - layout signature over base64(JSON(layout)) +// Returns the decoded index and layoutB64. No logging here; callers handle it. +func (task *CascadeRegistrationTask) validateIndexAndLayout(ctx context.Context, creator string, indexSignatureFormat string, layout codec.Layout) (cascadekit.IndexFile, []byte, error) { + // Extract and verify creator signature on index + indexB64, creatorSigB64, err := cascadekit.ExtractIndexAndCreatorSig(indexSignatureFormat) if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to extract index file and creator signature", err, f) + return cascadekit.IndexFile{}, nil, err } - logtrace.Info(ctx, "register: index+creatorSig extracted", f) - creatorSigBytes, err := base64.StdEncoding.DecodeString(creatorSig) + creatorSig, err := base64.StdEncoding.DecodeString(creatorSigB64) if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode creator signature from base64", err, f) + return cascadekit.IndexFile{}, nil, err } - if err := task.LumeraClient.Verify(ctx, creator, []byte(indexFileB64), creatorSigBytes); err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify creator signature", err, f) + if err := task.LumeraClient.Verify(ctx, creator, []byte(indexB64), creatorSig); err != nil { + return cascadekit.IndexFile{}, nil, err } - logtrace.Info(ctx, "register: creator signature verified", f) - indexFile, err := cascadekit.DecodeIndexB64(indexFileB64) + // Decode index + indexFile, err := cascadekit.DecodeIndexB64(indexB64) if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode index file", err, f) + return cascadekit.IndexFile{}, nil, err } - _ = indexFile // keep for potential future detail logs - layoutSigBytes, err := base64.StdEncoding.DecodeString(indexFile.LayoutSignature) + // Build layoutB64 and verify single-block + signature + layoutB64, err := cascadekit.LayoutB64(layout) if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode layout signature from base64", err, f) + return cascadekit.IndexFile{}, nil, err } - layoutB64, err := cascadekit.LayoutB64(encodedMeta) + if err := cascadekit.VerifySingleBlock(layout); err != nil { + return cascadekit.IndexFile{}, nil, err + } + layoutSig, err := base64.StdEncoding.DecodeString(indexFile.LayoutSignature) if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to build layout base64", err, f) + return cascadekit.IndexFile{}, nil, err } - if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSigBytes); err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify layout signature", err, f) + if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSig); err != nil { + return cascadekit.IndexFile{}, nil, err } - logtrace.Info(ctx, "register: layout signature verified", f) - logtrace.Info(ctx, "register: verify+decode layout ok", f) - return encodedMeta, indexFile.LayoutSignature, nil + return indexFile, layoutB64, nil } -func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta actiontypes.CascadeMetadata, sig string, encodedMeta codec.Layout, f logtrace.Fields) (cascadekit.GenRQIdentifiersFilesResponse, error) { +func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta actiontypes.CascadeMetadata, layoutSigB64 string, layoutB64 []byte, f logtrace.Fields) ([]string, [][]byte, error) { if f == nil { f = logtrace.Fields{} } f["rq_ic"] = uint32(meta.RqIdsIc) f["rq_max"] = uint32(meta.RqIdsMax) logtrace.Info(ctx, "register: rqid files generation start", f) - layoutRes, err := cascadekit.GenerateLayoutFiles(ctx, encodedMeta, sig, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) + + layoutIDs, layoutFiles, err := cascadekit.GenerateLayoutFilesFromB64(layoutB64, layoutSigB64, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) if err != nil { - return cascadekit.GenRQIdentifiersFilesResponse{}, task.wrapErr(ctx, "failed to generate layout files", err, f) + return nil, nil, task.wrapErr(ctx, "failed to generate layout files", err, f) } - logtrace.Info(ctx, "register: layout files generated", logtrace.Fields{"count": len(layoutRes.RedundantMetadataFiles)}) - indexIDs, indexFiles, err := cascadekit.GenerateIndexFiles(ctx, meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) + logtrace.Info(ctx, "register: layout files generated", logtrace.Fields{"count": len(layoutFiles), "layout_ids": len(layoutIDs)}) + indexIDs, indexFiles, err := cascadekit.GenerateIndexFiles(meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) if err != nil { - return cascadekit.GenRQIdentifiersFilesResponse{}, task.wrapErr(ctx, "failed to generate index files", err, f) + return nil, nil, task.wrapErr(ctx, "failed to generate index files", err, f) } - allFiles := append(layoutRes.RedundantMetadataFiles, indexFiles...) + allFiles := append(layoutFiles, indexFiles...) logtrace.Info(ctx, "register: index files generated", logtrace.Fields{"count": len(indexFiles), "rqids": len(indexIDs)}) logtrace.Info(ctx, "register: rqid files generation ok", logtrace.Fields{"total_files": len(allFiles)}) - return cascadekit.GenRQIdentifiersFilesResponse{RQIDs: indexIDs, RedundantMetadataFiles: allFiles}, nil + return indexIDs, allFiles, nil } func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionID string, idFiles [][]byte, symbolsDir string, f logtrace.Fields) error { @@ -182,7 +183,8 @@ func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action } fields["data_bytes"] = dataSize logtrace.Info(ctx, "register: verify action fee start", fields) - dataSizeInKBs := dataSize / 1024 + // Round up to the nearest KB to avoid underestimating required fee + dataSizeInKBs := (dataSize + 1023) / 1024 fee, err := task.LumeraClient.GetActionFee(ctx, strconv.Itoa(dataSizeInKBs)) if err != nil { return task.wrapErr(ctx, "failed to get action fee", err, fields) diff --git a/supernode/cascade/register.go b/supernode/cascade/register.go index 2fe2623d..a9b44117 100644 --- a/supernode/cascade/register.go +++ b/supernode/cascade/register.go @@ -25,125 +25,138 @@ type RegisterResponse struct { } func (task *CascadeRegistrationTask) Register( - ctx context.Context, - req *RegisterRequest, - send func(resp *RegisterResponse) error, + ctx context.Context, + req *RegisterRequest, + send func(resp *RegisterResponse) error, ) (err error) { - if req != nil && req.ActionID != "" { - ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) - ctx = logtrace.CtxWithOrigin(ctx, "first_pass") - task.taskID = req.TaskID - } - - fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} - logtrace.Info(ctx, "register: request", fields) - defer func() { - if req != nil && req.FilePath != "" { - if remErr := os.RemoveAll(req.FilePath); remErr != nil { - logtrace.Warn(ctx, "Failed to remove uploaded file", fields) - } else { - logtrace.Debug(ctx, "Uploaded file cleaned up", fields) - } - } - }() - - action, err := task.fetchAction(ctx, req.ActionID, fields) - if err != nil { - return err - } - fields[logtrace.FieldBlockHeight] = action.BlockHeight + // Step 1: Correlate context and capture task identity + if req != nil && req.ActionID != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) + ctx = logtrace.CtxWithOrigin(ctx, "first_pass") + task.taskID = req.TaskID + } + + // Step 2: Log request and ensure uploaded file cleanup + fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} + logtrace.Info(ctx, "register: request", fields) + defer func() { + if req != nil && req.FilePath != "" { + if remErr := os.RemoveAll(req.FilePath); remErr != nil { + logtrace.Warn(ctx, "Failed to remove uploaded file", fields) + } else { + logtrace.Debug(ctx, "Uploaded file cleaned up", fields) + } + } + }() + + // Step 3: Fetch the action details + action, err := task.fetchAction(ctx, req.ActionID, fields) + if err != nil { + return err + } + fields[logtrace.FieldBlockHeight] = action.BlockHeight fields[logtrace.FieldCreator] = action.Creator fields[logtrace.FieldStatus] = action.State fields[logtrace.FieldPrice] = action.Price logtrace.Info(ctx, "register: action fetched", fields) task.streamEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", send) - if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil { - return err - } + // Step 4: Verify action fee based on data size (rounded up to KB) + if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil { + return err + } logtrace.Info(ctx, "register: fee verified", fields) task.streamEvent(SupernodeEventTypeActionFeeVerified, "Action fee verified", "", send) - fields[logtrace.FieldSupernodeState] = task.config.SupernodeAccountAddress - if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil { - return err - } + // Step 5: Ensure this node is eligible (top supernode for block) + fields[logtrace.FieldSupernodeState] = task.SupernodeAccountAddress + if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil { + return err + } logtrace.Info(ctx, "register: top supernode confirmed", fields) task.streamEvent(SupernodeEventTypeTopSupernodeCheckPassed, "Top supernode eligibility confirmed", "", send) - cascadeMeta, err := cascadekit.UnmarshalCascadeMetadata(action.Metadata) - if err != nil { - return task.wrapErr(ctx, "failed to unmarshal cascade metadata", err, fields) - } + // Step 6: Decode Cascade metadata from the action + cascadeMeta, err := cascadekit.UnmarshalCascadeMetadata(action.Metadata) + if err != nil { + return task.wrapErr(ctx, "failed to unmarshal cascade metadata", err, fields) + } logtrace.Info(ctx, "register: metadata decoded", fields) task.streamEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", send) - if err := cascadekit.VerifyB64DataHash(req.DataHash, cascadeMeta.DataHash); err != nil { - return err - } + // Step 7: Verify request-provided data hash matches metadata + if err := cascadekit.VerifyB64DataHash(req.DataHash, cascadeMeta.DataHash); err != nil { + return err + } logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) logtrace.Info(ctx, "register: data hash matched", fields) task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send) - encResp, err := task.encodeInput(ctx, req.ActionID, req.FilePath, fields) - if err != nil { - return err - } - fields["symbols_dir"] = encResp.SymbolsDir + // Step 8: Encode input using the RQ codec to produce layout and symbols + encodeResult, err := task.encodeInput(ctx, req.ActionID, req.FilePath, fields) + if err != nil { + return err + } + fields["symbols_dir"] = encodeResult.SymbolsDir logtrace.Info(ctx, "register: input encoded", fields) task.streamEvent(SupernodeEventTypeInputEncoded, "Input encoded", "", send) - layout, signature, err := task.verifySignatureAndDecodeLayout(ctx, cascadeMeta.Signatures, action.Creator, encResp.Metadata, fields) - if err != nil { - return err - } - logtrace.Info(ctx, "register: signature verified", fields) - task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) - - rqidResp, err := task.generateRQIDFiles(ctx, cascadeMeta, signature, encResp.Metadata, fields) - if err != nil { - return err - } + // Step 9: Verify index and layout signatures; produce layoutB64 + logtrace.Info(ctx, "register: verify+decode layout start", fields) + indexFile, layoutB64, vErr := task.validateIndexAndLayout(ctx, action.Creator, cascadeMeta.Signatures, encodeResult.Layout) + if vErr != nil { + return task.wrapErr(ctx, "signature or index validation failed", vErr, fields) + } + layoutSignatureB64 := indexFile.LayoutSignature + logtrace.Info(ctx, "register: signature verified", fields) + task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) + + // Step 10: Generate RQID files (layout and index) and compute IDs + rqIDs, idFiles, err := task.generateRQIDFiles(ctx, cascadeMeta, layoutSignatureB64, layoutB64, fields) + if err != nil { + return err + } // Calculate combined size of all index and layout files totalSize := 0 - for _, file := range rqidResp.RedundantMetadataFiles { + for _, file := range idFiles { totalSize += len(file) } - fields["id_files_count"] = len(rqidResp.RedundantMetadataFiles) + fields["id_files_count"] = len(idFiles) + fields["rqids_count"] = len(rqIDs) fields["combined_files_size_bytes"] = totalSize fields["combined_files_size_kb"] = float64(totalSize) / 1024 fields["combined_files_size_mb"] = float64(totalSize) / (1024 * 1024) logtrace.Info(ctx, "register: rqid files generated", fields) task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send) - if err := cascadekit.VerifySingleBlockIDs(layout, encResp.Metadata); err != nil { - return task.wrapErr(ctx, "failed to verify IDs", err, fields) - } logtrace.Info(ctx, "register: rqids validated", fields) task.streamEvent(SupernodeEventTypeRqIDsVerified, "RQIDs verified", "", send) - if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqidResp.RQIDs); err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "register: finalize simulation failed", fields) - task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) - return task.wrapErr(ctx, "finalize action simulation failed", err, fields) - } + // Step 11: Simulate finalize to ensure the tx will succeed + if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqIDs); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Info(ctx, "register: finalize simulation failed", fields) + task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) + return task.wrapErr(ctx, "finalize action simulation failed", err, fields) + } logtrace.Info(ctx, "register: finalize simulation passed", fields) task.streamEvent(SupernodeEventTypeFinalizeSimulated, "Finalize simulation passed", "", send) - if err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields); err != nil { - return err - } - task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) - - resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "register: finalize action error", fields) - return task.wrapErr(ctx, "failed to finalize action", err, fields) - } + // Step 12: Store artefacts to the network store + if err := task.storeArtefacts(ctx, action.ActionID, idFiles, encodeResult.SymbolsDir, fields); err != nil { + return err + } + task.emitArtefactsStored(ctx, fields, encodeResult.Layout, send) + + // Step 13: Finalize the action on-chain + resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqIDs) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Info(ctx, "register: finalize action error", fields) + return task.wrapErr(ctx, "failed to finalize action", err, fields) + } txHash := resp.TxResponse.TxHash fields[logtrace.FieldTxHash] = txHash logtrace.Info(ctx, "register: action finalized", fields) diff --git a/supernode/cascade/service.go b/supernode/cascade/service.go index 374a9389..29b047bd 100644 --- a/supernode/cascade/service.go +++ b/supernode/cascade/service.go @@ -11,12 +11,11 @@ import ( ) type CascadeService struct { - config *Config - - LumeraClient adaptors.LumeraClient - P2P adaptors.P2PService - RQ adaptors.CodecService - P2PClient p2p.Client + LumeraClient adaptors.LumeraClient + P2P adaptors.P2PService + RQ adaptors.CodecService + P2PClient p2p.Client + SupernodeAccountAddress string } // Compile-time checks to ensure CascadeService implements required interfaces @@ -32,12 +31,12 @@ func (service *CascadeService) NewCascadeRegistrationTask() CascadeTask { func (service *CascadeService) Run(ctx context.Context) error { <-ctx.Done(); return nil } // NewCascadeService returns a new CascadeService instance -func NewCascadeService(config *Config, lumera lumera.Client, p2pClient p2p.Client, codec codec.Codec, rqstore rqstore.Store) *CascadeService { +func NewCascadeService(supernodeAccountAddress string, lumera lumera.Client, p2pClient p2p.Client, codec codec.Codec, rqstore rqstore.Store) *CascadeService { return &CascadeService{ - config: config, - LumeraClient: adaptors.NewLumeraClient(lumera), - P2P: adaptors.NewP2PService(p2pClient, rqstore), - RQ: adaptors.NewCodecService(codec), - P2PClient: p2pClient, + LumeraClient: adaptors.NewLumeraClient(lumera), + P2P: adaptors.NewP2PService(p2pClient, rqstore), + RQ: adaptors.NewCodecService(codec), + P2PClient: p2pClient, + SupernodeAccountAddress: supernodeAccountAddress, } } diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index b96bc7d5..44722f24 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -115,7 +115,7 @@ The supernode will connect to the Lumera network and begin participating in the // Configure cascade service cService := cascadeService.NewCascadeService( - &cascadeService.Config{SupernodeAccountAddress: appConfig.SupernodeConfig.Identity, RqFilesDir: appConfig.GetRaptorQFilesDir()}, + appConfig.SupernodeConfig.Identity, lumeraClient, p2pService, codec.NewRaptorQCodec(appConfig.GetRaptorQFilesDir()), diff --git a/tests/system/e2e_cascade_test.go b/tests/system/e2e_cascade_test.go index 2db7ad09..b9af06d2 100644 --- a/tests/system/e2e_cascade_test.go +++ b/tests/system/e2e_cascade_test.go @@ -297,7 +297,7 @@ func TestCascadeE2E(t *testing.T) { t.Logf("Requesting cascade action with metadata: %s", metadata) t.Logf("Action type: %s, Price: %s, Expiration: %s", actionType, autoPrice, expirationTime) - response, err := lumeraClinet.ActionMsg().RequestAction(ctx, actionType, metadata, autoPrice, expirationTime) + response, _ := lumeraClinet.ActionMsg().RequestAction(ctx, actionType, metadata, autoPrice, expirationTime) txresp := response.TxResponse @@ -356,26 +356,38 @@ func TestCascadeE2E(t *testing.T) { // Step 9: Subscribe to all events and extract tx hash // --------------------------------------- - // Channel to receive the transaction hash - txHashCh := make(chan string, 1) - completionCh := make(chan bool, 1) - - // Subscribe to ALL events - err = actionClient.SubscribeToAllEvents(context.Background(), func(ctx context.Context, e event.Event) { - // Only capture TxhasReceived events - if e.Type == event.SDKTaskTxHashReceived { - if txHash, ok := e.Data[event.KeyTxHash].(string); ok && txHash != "" { - // Send the hash to our channel - txHashCh <- txHash - } - } - - // Also monitor for task completion - if e.Type == event.SDKTaskCompleted { - completionCh <- true - } - }) - require.NoError(t, err, "Failed to subscribe to events") + // Channels to receive async signals + txHashCh := make(chan string, 1) + completionCh := make(chan bool, 1) + errCh := make(chan string, 1) + + // Subscribe to ALL events (non-blocking sends to avoid handler stalls) + err = actionClient.SubscribeToAllEvents(context.Background(), func(ctx context.Context, e event.Event) { + // Log every event for debugging and capture key ones + t.Logf("SDK event: type=%s data=%v", e.Type, e.Data) + // Only capture TxhasReceived events + if e.Type == event.SDKTaskTxHashReceived { + if txHash, ok := e.Data[event.KeyTxHash].(string); ok && txHash != "" { + // Non-blocking send; drop if buffer full + select { case txHashCh <- txHash: default: } + } + } + + // Also monitor for task completion + if e.Type == event.SDKTaskCompleted { + // Non-blocking send; drop if buffer full + select { case completionCh <- true: default: } + } + // Capture task failures and propagate error message to main goroutine + if e.Type == event.SDKTaskFailed { + if msg, ok := e.Data[event.KeyError].(string); ok && msg != "" { + select { case errCh <- msg: default: } + } else { + select { case errCh <- "task failed (no error message)" : default: } + } + } + }) + require.NoError(t, err, "Failed to subscribe to events") // Start cascade operation @@ -390,8 +402,26 @@ func TestCascadeE2E(t *testing.T) { require.NoError(t, err, "Failed to start cascade operation") t.Logf("Cascade operation started with task ID: %s", taskID) - recievedhash := <-txHashCh - <-completionCh + // Wait for both tx-hash and completion with a timeout + var recievedhash string + done := false + timeout := time.After(2 * time.Minute) +waitLoop: + for { + if recievedhash != "" && done { + break waitLoop + } + select { + case h := <-txHashCh: + if recievedhash == "" { recievedhash = h } + case <-completionCh: + done = true + case emsg := <-errCh: + t.Fatalf("cascade task reported failure: %s", emsg) + case <-timeout: + t.Fatalf("timeout waiting for events; recievedhash=%q done=%v", recievedhash, done) + } + } t.Logf("Received transaction hash: %s", recievedhash) diff --git a/tests/system/go.mod b/tests/system/go.mod index 99bb1df9..052a1b76 100644 --- a/tests/system/go.mod +++ b/tests/system/go.mod @@ -104,6 +104,7 @@ require ( github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect @@ -118,7 +119,6 @@ require ( github.com/improbable-eng/grpc-web v0.15.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/kr/pretty v0.3.1 // indirect @@ -128,8 +128,6 @@ require ( github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/minio/highwayhash v1.0.3 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect diff --git a/tests/system/go.sum b/tests/system/go.sum index 5737b819..1ac6ecda 100644 --- a/tests/system/go.sum +++ b/tests/system/go.sum @@ -427,6 +427,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= @@ -505,7 +507,6 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -579,11 +580,9 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= From c2372e86cf11fba8f1b2df5541631f90517d8079 Mon Sep 17 00:00:00 2001 From: Matee ullah Malik <46045452+mateeullahmalik@users.noreply.github.com> Date: Mon, 20 Oct 2025 18:16:25 +0500 Subject: [PATCH 30/36] Optionally enforce min supernode version (#206) --- Makefile | 2 + p2p/kademlia/dht.go | 10 +- p2p/kademlia/network.go | 8 +- p2p/kademlia/node.go | 4 +- p2p/kademlia/version_gate.go | 118 +++++++++++++++++++---- sdk/helpers/github_helper.go | 37 -------- sdk/task/task.go | 6 +- supernode/cascade/register.go | 174 +++++++++++++++++----------------- supernode/cmd/start.go | 10 +- supernode/cmd/version.go | 2 + 10 files changed, 208 insertions(+), 163 deletions(-) delete mode 100644 sdk/helpers/github_helper.go diff --git a/Makefile b/Makefile index e7c44da4..ad855f4d 100644 --- a/Makefile +++ b/Makefile @@ -10,9 +10,11 @@ GIT_COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown") BUILD_TIME ?= $(shell date -u '+%Y-%m-%d_%H:%M:%S') # Linker flags for version information +# Optional minimum peer version for DHT gating can be provided via MIN_VER env/make var LDFLAGS = -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.Version=$(VERSION) \ -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.GitCommit=$(GIT_COMMIT) \ -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.BuildTime=$(BUILD_TIME) \ + -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.MinVer=$(MIN_VER) \ -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDAPIKey=$(DD_API_KEY) \ -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDSite=$(DD_SITE) diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index 7048a727..a430de0b 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -501,7 +501,7 @@ func (s *DHT) newMessage(messageType int, receiver *Node, data interface{}) *Mes IP: hostIP, ID: s.ht.self.ID, Port: s.ht.self.Port, - Version: requiredVersion(), + Version: localVersion(), } return &Message{ Sender: sender, @@ -1399,21 +1399,21 @@ func (s *DHT) sendStoreData(ctx context.Context, n *Node, request *StoreDataRequ // add a node into the appropriate k bucket, return the removed node if it's full func (s *DHT) addNode(ctx context.Context, node *Node) *Node { - // Strict version gating: must match env and be non-empty. + // Minimum-version gating: reject nodes below configured minimum. peerVer := "" if node != nil { peerVer = node.Version } - if required, mismatch := versionMismatch(peerVer); mismatch { + if minRequired, tooOld := versionTooOld(peerVer); tooOld { fields := logtrace.Fields{ logtrace.FieldModule: "p2p", - "required": required, + "min_required": minRequired, "peer_version": strings.TrimSpace(peerVer), } if node != nil { fields["peer"] = node.String() } - logtrace.Debug(ctx, "Rejecting node due to version mismatch", fields) + logtrace.Debug(ctx, "Rejecting node: peer below minimum version", fields) return nil } // Allow localhost for integration testing diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index cb1ff928..a5ae39ee 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -415,15 +415,15 @@ func (s *Network) handleConn(ctx context.Context, rawConn net.Conn) { } } - // Strict version gating: reject immediately on mismatch or missing + // Minimum-version gating: reject immediately if peer is below configured minimum var senderVer string if request != nil && request.Sender != nil { senderVer = request.Sender.Version } - if required, mismatch := versionMismatch(senderVer); mismatch { - logtrace.Debug(ctx, "Rejecting connection due to version mismatch", logtrace.Fields{ + if minRequired, tooOld := versionTooOld(senderVer); tooOld { + logtrace.Debug(ctx, "Rejecting connection: peer below minimum version", logtrace.Fields{ logtrace.FieldModule: "p2p", - "required": required, + "min_required": minRequired, "peer_version": strings.TrimSpace(senderVer), }) return diff --git a/p2p/kademlia/node.go b/p2p/kademlia/node.go index ed37d4be..0011c8be 100644 --- a/p2p/kademlia/node.go +++ b/p2p/kademlia/node.go @@ -23,8 +23,8 @@ type Node struct { // port of the node Port uint16 `json:"port,omitempty"` - // Version of the supernode binary (used for strict DHT gating) - Version string `json:"version,omitempty"` + // Version of the supernode binary (advertised to peers; may be used by min-version gating) + Version string `json:"version,omitempty"` HashedID []byte } diff --git a/p2p/kademlia/version_gate.go b/p2p/kademlia/version_gate.go index 74c7dc77..d2d1a755 100644 --- a/p2p/kademlia/version_gate.go +++ b/p2p/kademlia/version_gate.go @@ -1,34 +1,112 @@ package kademlia import ( - "os" + "strconv" "strings" ) -var requiredVer string +// localVer is the advertised version of this binary (e.g., v1.2.3), +// injected by the caller (supernode/cmd) at startup. +var localVer string -// SetRequiredVersion sets the version that peers must match to be accepted. -func SetRequiredVersion(v string) { - requiredVer = strings.TrimSpace(v) +// minVer is the optional minimum peer version to accept. If empty, gating is disabled. +var minVer string + +// SetLocalVersion sets the version this node advertises to peers. +func SetLocalVersion(v string) { + localVer = strings.TrimSpace(v) } -// requiredVersion returns the configured required version (build-time injected by caller). -func requiredVersion() string { - return requiredVer +// SetMinVersion sets the optional minimum required peer version for DHT interactions. +// When empty, version gating is disabled and all peers are accepted regardless of version string. +func SetMinVersion(v string) { + minVer = strings.TrimSpace(v) } -// versionMismatch determines if the given peer version is unacceptable. -// Policy: required and peer must both be non-empty and exactly equal. -func versionMismatch(peerVersion string) (required string, mismatch bool) { - required = requiredVersion() - // Bypass strict gating during integration tests. - // Tests set os.Setenv("INTEGRATION_TEST", "true"). - if os.Getenv("INTEGRATION_TEST") == "true" { - return required, false +// localVersion returns the configured advertised version. +func localVersion() string { return localVer } + +// minimumVersion returns the configured minimum acceptable version; empty disables gating. +func minimumVersion() string { return minVer } + +// versionTooOld reports whether the peerVersion is below the configured minimum version. +// If no minimum is configured, gating is disabled and this returns ("", false). +func versionTooOld(peerVersion string) (minRequired string, tooOld bool) { + minRequired = minimumVersion() + if strings.TrimSpace(minRequired) == "" { + // Gating disabled + return "", false + } + + // Normalize inputs (strip leading 'v' and pre-release/build metadata) + p, okP := parseSemver(peerVersion) + m, okM := parseSemver(minRequired) + if !okM { + // Misconfigured minimum; disable gating to avoid accidental network splits. + return "", false + } + if !okP { + // Peer did not provide a valid version; treat as too old under a min-version policy. + return minRequired, true + } + // Compare peer >= min + if p[0] < m[0] { + return minRequired, true + } + if p[0] > m[0] { + return minRequired, false + } + if p[1] < m[1] { + return minRequired, true + } + if p[1] > m[1] { + return minRequired, false + } + if p[2] < m[2] { + return minRequired, true + } + return minRequired, false +} + +// parseSemver parses versions like "v1.2.3", "1.2.3-alpha" into [major, minor, patch]. +// Returns ok=false if no numeric major part is found. +func parseSemver(v string) ([3]int, bool) { + var out [3]int + s := strings.TrimSpace(v) + if s == "" { + return out, false + } + if s[0] == 'v' || s[0] == 'V' { + s = s[1:] + } + // Drop pre-release/build metadata + if i := strings.IndexAny(s, "-+"); i >= 0 { + s = s[:i] + } + parts := strings.Split(s, ".") + if len(parts) == 0 { + return out, false } - peer := strings.TrimSpace(peerVersion) - if required == "" || peer == "" || peer != required { - return required, true + // Parse up to 3 numeric parts; missing parts default to 0 + for i := 0; i < len(parts) && i < 3; i++ { + numStr := parts[i] + // Trim non-digit suffixes (e.g., "1rc1" -> "1") + j := 0 + for j < len(numStr) && numStr[j] >= '0' && numStr[j] <= '9' { + j++ + } + if j == 0 { + // No leading digits + if i == 0 { + return out, false + } + break + } + n, err := strconv.Atoi(numStr[:j]) + if err != nil { + return out, false + } + out[i] = n } - return required, false + return out, true } diff --git a/sdk/helpers/github_helper.go b/sdk/helpers/github_helper.go deleted file mode 100644 index 0c028c55..00000000 --- a/sdk/helpers/github_helper.go +++ /dev/null @@ -1,37 +0,0 @@ -package helpers - -import ( - "os" - "strings" - "sync" - - gh "github.com/LumeraProtocol/supernode/v2/pkg/github" -) - -var ( - requiredSupernodeVersion string - requiredVersionOnce sync.Once -) - -// ResolveRequiredSupernodeVersion returns the latest stable SuperNode tag from GitHub. -// The value is fetched once per process and cached. If lookup fails, it returns -// an empty string so callers can gracefully skip strict version gating. -func ResolveRequiredSupernodeVersion() string { - // Bypass strict version gating during integration tests. - if os.Getenv("INTEGRATION_TEST") == "true" { - return "" - } - requiredVersionOnce.Do(func() { - client := gh.NewClient("LumeraProtocol/supernode") - if client != nil { - if release, err := client.GetLatestStableRelease(); err == nil { - if tag := strings.TrimSpace(release.TagName); tag != "" { - requiredSupernodeVersion = tag - return - } - } - } - requiredSupernodeVersion = "" - }) - return requiredSupernodeVersion -} diff --git a/sdk/task/task.go b/sdk/task/task.go index 7dd72e8f..1779a93a 100644 --- a/sdk/task/task.go +++ b/sdk/task/task.go @@ -13,7 +13,6 @@ import ( "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" "github.com/LumeraProtocol/supernode/v2/sdk/config" "github.com/LumeraProtocol/supernode/v2/sdk/event" - "github.com/LumeraProtocol/supernode/v2/sdk/helpers" "github.com/LumeraProtocol/supernode/v2/sdk/log" "github.com/LumeraProtocol/supernode/v2/sdk/net" "google.golang.org/grpc/health/grpc_health_v1" @@ -183,10 +182,7 @@ func (t *BaseTask) fetchSupernodesWithLoads(ctx context.Context, height int64) ( t.logger.Info(cctx, "reject supernode: status fetch failed", "error", err) return nil } - if reqVer := helpers.ResolveRequiredSupernodeVersion(); reqVer != "" && status.Version != reqVer { - t.logger.Info(cctx, "reject supernode: version mismatch", "expected", reqVer, "got", status.Version) - return nil - } + // Removed SDK-level version gating; rely on network/node policies instead. // Compute load from running tasks (sum of task_count across services) total := 0 diff --git a/supernode/cascade/register.go b/supernode/cascade/register.go index a9b44117..926f9b31 100644 --- a/supernode/cascade/register.go +++ b/supernode/cascade/register.go @@ -25,97 +25,97 @@ type RegisterResponse struct { } func (task *CascadeRegistrationTask) Register( - ctx context.Context, - req *RegisterRequest, - send func(resp *RegisterResponse) error, + ctx context.Context, + req *RegisterRequest, + send func(resp *RegisterResponse) error, ) (err error) { - // Step 1: Correlate context and capture task identity - if req != nil && req.ActionID != "" { - ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) - ctx = logtrace.CtxWithOrigin(ctx, "first_pass") - task.taskID = req.TaskID - } - - // Step 2: Log request and ensure uploaded file cleanup - fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} - logtrace.Info(ctx, "register: request", fields) - defer func() { - if req != nil && req.FilePath != "" { - if remErr := os.RemoveAll(req.FilePath); remErr != nil { - logtrace.Warn(ctx, "Failed to remove uploaded file", fields) - } else { - logtrace.Debug(ctx, "Uploaded file cleaned up", fields) - } - } - }() - - // Step 3: Fetch the action details - action, err := task.fetchAction(ctx, req.ActionID, fields) - if err != nil { - return err - } - fields[logtrace.FieldBlockHeight] = action.BlockHeight + // Step 1: Correlate context and capture task identity + if req != nil && req.ActionID != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) + ctx = logtrace.CtxWithOrigin(ctx, "first_pass") + task.taskID = req.TaskID + } + + // Step 2: Log request and ensure uploaded file cleanup + fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} + logtrace.Info(ctx, "register: request", fields) + defer func() { + if req != nil && req.FilePath != "" { + if remErr := os.RemoveAll(req.FilePath); remErr != nil { + logtrace.Warn(ctx, "Failed to remove uploaded file", fields) + } else { + logtrace.Debug(ctx, "Uploaded file cleaned up", fields) + } + } + }() + + // Step 3: Fetch the action details + action, err := task.fetchAction(ctx, req.ActionID, fields) + if err != nil { + return err + } + fields[logtrace.FieldBlockHeight] = action.BlockHeight fields[logtrace.FieldCreator] = action.Creator fields[logtrace.FieldStatus] = action.State fields[logtrace.FieldPrice] = action.Price logtrace.Info(ctx, "register: action fetched", fields) task.streamEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", send) - // Step 4: Verify action fee based on data size (rounded up to KB) - if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil { - return err - } + // Step 4: Verify action fee based on data size (rounded up to KB) + if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil { + return err + } logtrace.Info(ctx, "register: fee verified", fields) task.streamEvent(SupernodeEventTypeActionFeeVerified, "Action fee verified", "", send) - // Step 5: Ensure this node is eligible (top supernode for block) - fields[logtrace.FieldSupernodeState] = task.SupernodeAccountAddress - if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil { - return err - } + // Step 5: Ensure this node is eligible (top supernode for block) + fields[logtrace.FieldSupernodeState] = task.SupernodeAccountAddress + if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil { + return err + } logtrace.Info(ctx, "register: top supernode confirmed", fields) task.streamEvent(SupernodeEventTypeTopSupernodeCheckPassed, "Top supernode eligibility confirmed", "", send) - // Step 6: Decode Cascade metadata from the action - cascadeMeta, err := cascadekit.UnmarshalCascadeMetadata(action.Metadata) - if err != nil { - return task.wrapErr(ctx, "failed to unmarshal cascade metadata", err, fields) - } + // Step 6: Decode Cascade metadata from the action + cascadeMeta, err := cascadekit.UnmarshalCascadeMetadata(action.Metadata) + if err != nil { + return task.wrapErr(ctx, "failed to unmarshal cascade metadata", err, fields) + } logtrace.Info(ctx, "register: metadata decoded", fields) task.streamEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", send) - // Step 7: Verify request-provided data hash matches metadata - if err := cascadekit.VerifyB64DataHash(req.DataHash, cascadeMeta.DataHash); err != nil { - return err - } + // Step 7: Verify request-provided data hash matches metadata + if err := cascadekit.VerifyB64DataHash(req.DataHash, cascadeMeta.DataHash); err != nil { + return err + } logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) logtrace.Info(ctx, "register: data hash matched", fields) task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send) - // Step 8: Encode input using the RQ codec to produce layout and symbols - encodeResult, err := task.encodeInput(ctx, req.ActionID, req.FilePath, fields) - if err != nil { - return err - } + // Step 8: Encode input using the RQ codec to produce layout and symbols + encodeResult, err := task.encodeInput(ctx, req.ActionID, req.FilePath, fields) + if err != nil { + return err + } fields["symbols_dir"] = encodeResult.SymbolsDir logtrace.Info(ctx, "register: input encoded", fields) task.streamEvent(SupernodeEventTypeInputEncoded, "Input encoded", "", send) - // Step 9: Verify index and layout signatures; produce layoutB64 - logtrace.Info(ctx, "register: verify+decode layout start", fields) - indexFile, layoutB64, vErr := task.validateIndexAndLayout(ctx, action.Creator, cascadeMeta.Signatures, encodeResult.Layout) - if vErr != nil { - return task.wrapErr(ctx, "signature or index validation failed", vErr, fields) - } - layoutSignatureB64 := indexFile.LayoutSignature - logtrace.Info(ctx, "register: signature verified", fields) - task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) - - // Step 10: Generate RQID files (layout and index) and compute IDs - rqIDs, idFiles, err := task.generateRQIDFiles(ctx, cascadeMeta, layoutSignatureB64, layoutB64, fields) - if err != nil { - return err - } + // Step 9: Verify index and layout signatures; produce layoutB64 + logtrace.Info(ctx, "register: verify+decode layout start", fields) + indexFile, layoutB64, vErr := task.validateIndexAndLayout(ctx, action.Creator, cascadeMeta.Signatures, encodeResult.Layout) + if vErr != nil { + return task.wrapErr(ctx, "signature or index validation failed", vErr, fields) + } + layoutSignatureB64 := indexFile.LayoutSignature + logtrace.Info(ctx, "register: signature verified", fields) + task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) + + // Step 10: Generate RQID files (layout and index) and compute IDs + rqIDs, idFiles, err := task.generateRQIDFiles(ctx, cascadeMeta, layoutSignatureB64, layoutB64, fields) + if err != nil { + return err + } // Calculate combined size of all index and layout files totalSize := 0 @@ -134,29 +134,29 @@ func (task *CascadeRegistrationTask) Register( logtrace.Info(ctx, "register: rqids validated", fields) task.streamEvent(SupernodeEventTypeRqIDsVerified, "RQIDs verified", "", send) - // Step 11: Simulate finalize to ensure the tx will succeed - if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqIDs); err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "register: finalize simulation failed", fields) - task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) - return task.wrapErr(ctx, "finalize action simulation failed", err, fields) - } + // Step 11: Simulate finalize to ensure the tx will succeed + if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqIDs); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Info(ctx, "register: finalize simulation failed", fields) + task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) + return task.wrapErr(ctx, "finalize action simulation failed", err, fields) + } logtrace.Info(ctx, "register: finalize simulation passed", fields) task.streamEvent(SupernodeEventTypeFinalizeSimulated, "Finalize simulation passed", "", send) - // Step 12: Store artefacts to the network store - if err := task.storeArtefacts(ctx, action.ActionID, idFiles, encodeResult.SymbolsDir, fields); err != nil { - return err - } - task.emitArtefactsStored(ctx, fields, encodeResult.Layout, send) - - // Step 13: Finalize the action on-chain - resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqIDs) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "register: finalize action error", fields) - return task.wrapErr(ctx, "failed to finalize action", err, fields) - } + // Step 12: Store artefacts to the network store + if err := task.storeArtefacts(ctx, action.ActionID, idFiles, encodeResult.SymbolsDir, fields); err != nil { + return err + } + task.emitArtefactsStored(ctx, fields, encodeResult.Layout, send) + + // Step 13: Finalize the action on-chain + resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqIDs) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Info(ctx, "register: finalize action error", fields) + return task.wrapErr(ctx, "failed to finalize action", err, fields) + } txHash := resp.TxResponse.TxHash fields[logtrace.FieldTxHash] = txHash logtrace.Info(ctx, "register: action finalized", fields) diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 44722f24..f2d81467 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -34,7 +34,7 @@ import ( pbsupernode "github.com/LumeraProtocol/supernode/v2/gen/supernode" - // Configure DHT version gating from build-injected Version + // Configure DHT advertised/minimum versions from build-time variables "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" ) @@ -48,8 +48,12 @@ The supernode will connect to the Lumera network and begin participating in the // Initialize logging logtrace.Setup("supernode") - // Set strict DHT required version from build-time injected variable - kademlia.SetRequiredVersion(Version) + // Advertise our binary version to peers + kademlia.SetLocalVersion(Version) + // Optionally enforce a minimum peer version if provided at build time + if strings.TrimSpace(MinVer) != "" { + kademlia.SetMinVersion(MinVer) + } // Create context with correlation ID for tracing ctx := logtrace.CtxWithCorrelationID(context.Background(), "supernode-start") diff --git a/supernode/cmd/version.go b/supernode/cmd/version.go index e6d085d8..9daaabc8 100644 --- a/supernode/cmd/version.go +++ b/supernode/cmd/version.go @@ -11,6 +11,8 @@ var ( Version = "dev" GitCommit = "unknown" BuildTime = "unknown" + // Optional: minimum peer version for DHT gating (empty disables gating) + MinVer = "" ) // versionCmd represents the version command From c5b142a28f429f13485b789b1006c1a1176066c5 Mon Sep 17 00:00:00 2001 From: J Bilal rafique Date: Wed, 22 Oct 2025 17:04:51 +0500 Subject: [PATCH 31/36] Optimize supernode symbol fetch --- p2p/kademlia/dht.go | 285 ++++++++++++++++++++----------------------- p2p/kademlia/node.go | 14 +-- 2 files changed, 140 insertions(+), 159 deletions(-) diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index a430de0b..acc4193c 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -17,6 +17,7 @@ import ( "github.com/btcsuite/btcutil/base58" "github.com/cenkalti/backoff/v4" "github.com/cosmos/cosmos-sdk/crypto/keyring" + "golang.org/x/sync/semaphore" "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" "github.com/LumeraProtocol/supernode/v2/pkg/errors" @@ -688,11 +689,7 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, var foundLocalCount int32 hexKeys := make([]string, len(keys)) - globalClosestContacts := make(map[string]*NodeList) hashes := make([][]byte, len(keys)) - knownNodes := make(map[string]*Node) - var knownMu sync.Mutex - var closestMu sync.RWMutex defer func() { resMap.Range(func(key, value interface{}) bool { @@ -716,15 +713,6 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, } }() - for _, key := range keys { - result[key] = nil - } - - supernodeAddr, _ := s.getSupernodeAddress(ctx) - hostIP := parseSupernodeAddress(supernodeAddr) - self := &Node{ID: s.ht.self.ID, IP: hostIP, Port: s.ht.self.Port} - self.SetHashedID() - for i, key := range keys { decoded := base58.Decode(key) if len(decoded) != B/8 { @@ -732,16 +720,60 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, } hashes[i] = decoded hexKeys[i] = hex.EncodeToString(decoded) + result[key] = nil } + foundLocalCount, err = s.fetchAndAddLocalKeys(ctx, hexKeys, &resMap, required) + if err != nil { + return nil, fmt.Errorf("fetch and add local keys: %v", err) + } + // Found locally count is logged via summary below; no external metrics + + if foundLocalCount >= required { + logtrace.Debug(ctx, "DHT BatchRetrieve satisfied from local storage", logtrace.Fields{ + "txid": txID, "found_local": foundLocalCount, "required": required, + }) + return result, nil + } + + if len(localOnly) > 0 && localOnly[0] { + logtrace.Debug(ctx, "DHT BatchRetrieve local-only mode, insufficient keys", logtrace.Fields{ + "txid": txID, "found_local": foundLocalCount, "required": required, + }) + return result, fmt.Errorf("local-only: found %d, required %d", foundLocalCount, required) + } + + supernodeAddr, addrErr := s.getSupernodeAddress(ctx) + if addrErr != nil { + logtrace.Warn(ctx, "Failed to get supernode address", logtrace.Fields{ + logtrace.FieldModule: "dht", + logtrace.FieldError: addrErr.Error(), + }) + } + hostIP := parseSupernodeAddress(supernodeAddr) + self := &Node{ID: s.ht.self.ID, IP: hostIP, Port: s.ht.self.Port} + self.SetHashedID() + + knownNodes := make(map[string]*Node) + var knownMu sync.Mutex + for _, n := range s.ht.nodes() { nn := &Node{ID: n.ID, IP: n.IP, Port: n.Port} nn.SetHashedID() knownNodes[string(nn.ID)] = nn } + ignoreList := s.ignorelist.ToNodeList() + + globalClosestContacts := make(map[string]*NodeList) + var closestMu sync.RWMutex + for i := range keys { - top6 := s.ht.closestContactsWithIncludingNode(Alpha, hashes[i], s.ignorelist.ToNodeList(), nil) + if _, found := resMap.Load(hexKeys[i]); found { + continue + } + + top6 := s.ht.closestContactsWithIncludingNode(Alpha, hashes[i], ignoreList, nil) closestMu.Lock() globalClosestContacts[keys[i]] = top6 closestMu.Unlock() @@ -750,21 +782,12 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, delete(knownNodes, string(self.ID)) - foundLocalCount, err = s.fetchAndAddLocalKeys(ctx, hexKeys, &resMap, required) - if err != nil { - return nil, fmt.Errorf("fetch and add local keys: %v", err) - } - // Found locally count is logged via summary below; no external metrics - if foundLocalCount >= required { - return result, nil - } - batchSize := batchRetrieveSize var networkFound int32 totalBatches := int(math.Ceil(float64(required) / float64(batchSize))) parallelBatches := int(math.Min(float64(totalBatches), float64(fetchSymbolsBatchConcurrency))) - semaphore := make(chan struct{}, parallelBatches) + sem := semaphore.NewWeighted(int64(parallelBatches)) var wg sync.WaitGroup gctx, cancel := context.WithCancel(ctx) defer cancel() @@ -777,27 +800,39 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, if end > len(keys) { end = len(keys) } + if atomic.LoadInt32(&networkFound)+int32(foundLocalCount) >= int32(required) { break } wg.Add(1) - semaphore <- struct{}{} - go s.processBatch( - gctx, - keys[start:end], - hexKeys[start:end], - semaphore, &wg, - globalClosestContacts, - &closestMu, - knownNodes, &knownMu, - &resMap, - required, - foundLocalCount, - &networkFound, - cancel, - txID, - ) + go func(start, end int) { + defer wg.Done() + + if err := sem.Acquire(gctx, 1); err != nil { + return + } + defer sem.Release(1) + + if atomic.LoadInt32(&networkFound)+int32(foundLocalCount) >= int32(required) { + return + } + + s.processBatch( + gctx, + keys[start:end], + hexKeys[start:end], + globalClosestContacts, + &closestMu, + knownNodes, &knownMu, + &resMap, + required, + foundLocalCount, + &networkFound, + cancel, + txID, + ) + }(start, end) } wg.Wait() @@ -821,8 +856,6 @@ func (s *DHT) processBatch( ctx context.Context, batchKeys []string, batchHexKeys []string, - semaphore chan struct{}, - wg *sync.WaitGroup, globalClosestContacts map[string]*NodeList, closestMu *sync.RWMutex, knownNodes map[string]*Node, @@ -834,94 +867,65 @@ func (s *DHT) processBatch( cancel context.CancelFunc, txID string, ) { - defer wg.Done() - defer func() { <-semaphore }() + select { + case <-ctx.Done(): + return + default: + } - for i := 0; i < maxIterations; i++ { - select { - case <-ctx.Done(): - return - default: - } + fetchMap := make(map[string][]int) - // Build fetch map (read globalClosestContacts under RLock) - fetchMap := make(map[string][]int) - for i, key := range batchKeys { - closestMu.RLock() - nl := globalClosestContacts[key] - closestMu.RUnlock() - if nl == nil { - continue - } - for _, node := range nl.Nodes { - nodeID := string(node.ID) - fetchMap[nodeID] = append(fetchMap[nodeID], i) - } - } + closestMu.RLock() + localContacts := make(map[string]*NodeList, len(batchKeys)) + for _, key := range batchKeys { + localContacts[key] = globalClosestContacts[key] + } + closestMu.RUnlock() - foundCount, newClosestContacts, batchErr := s.iterateBatchGetValues( - ctx, knownNodes, batchKeys, batchHexKeys, fetchMap, resMap, required, foundLocalCount+atomic.LoadInt32(networkFound), - ) - if batchErr != nil { - logtrace.Error(ctx, "Iterate batch get values failed", logtrace.Fields{ - logtrace.FieldModule: "dht", "txid": txID, logtrace.FieldError: batchErr.Error(), - }) + for idx, key := range batchKeys { + nl := localContacts[key] + if nl == nil { + continue } - - atomic.AddInt32(networkFound, int32(foundCount)) - if atomic.LoadInt32(networkFound)+int32(foundLocalCount) >= int32(required) { - cancel() - break + for _, node := range nl.Nodes { + nodeID := string(node.ID) + fetchMap[nodeID] = append(fetchMap[nodeID], idx) } + } - changed := false - for key, nodesList := range newClosestContacts { - if nodesList == nil || nodesList.Nodes == nil { - continue - } - - closestMu.RLock() - curr := globalClosestContacts[key] - closestMu.RUnlock() - if curr == nil || curr.Nodes == nil { - logtrace.Warn(ctx, "Global contacts missing key during merge", logtrace.Fields{"key": key}) - continue - } - - if !haveAllNodes(nodesList.Nodes, curr.Nodes) { - changed = true - } - - nodesList.AddNodes(curr.Nodes) - nodesList.Sort() - nodesList.TopN(Alpha) - - s.addKnownNodesSafe(ctx, nodesList.Nodes, knownNodes, knownMu) - - closestMu.Lock() - globalClosestContacts[key] = nodesList - closestMu.Unlock() - } + foundCount, batchErr := s.iterateBatchGetValues( + ctx, knownNodes, batchHexKeys, fetchMap, resMap, required, foundLocalCount+atomic.LoadInt32(networkFound), + ) + if batchErr != nil { + logtrace.Error(ctx, "Iterate batch get values failed", logtrace.Fields{ + logtrace.FieldModule: "dht", "txid": txID, logtrace.FieldError: batchErr.Error(), + }) + } - if !changed { - break - } + atomic.AddInt32(networkFound, int32(foundCount)) + if atomic.LoadInt32(networkFound)+int32(foundLocalCount) >= int32(required) { + cancel() } } -func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, keys []string, hexKeys []string, fetchMap map[string][]int, - resMap *sync.Map, req, alreadyFound int32) (int, map[string]*NodeList, error) { - semaphore := make(chan struct{}, storeSameSymbolsBatchConcurrency) // Limit concurrency to 1 - closestContacts := make(map[string]*NodeList) +func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, hexKeys []string, fetchMap map[string][]int, + resMap *sync.Map, req, alreadyFound int32) (int, error) { + sem := semaphore.NewWeighted(int64(storeSameSymbolsBatchConcurrency)) var wg sync.WaitGroup - contactsMap := make(map[string]map[string][]*Node) var firstErr error var mu sync.Mutex // To protect the firstErr foundCount := int32(0) gctx, cancel := context.WithCancel(ctx) // Create a cancellable context defer cancel() - for nodeID, node := range nodes { + + for nodeID := range fetchMap { + node, ok := nodes[nodeID] + if !ok { + continue + } + + if s.ignorelist.Banned(node) { logtrace.Debug(ctx, "Ignore banned node in iterate batch get values", logtrace.Fields{ logtrace.FieldModule: "dht", @@ -930,18 +934,17 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, continue } - contactsMap[nodeID] = make(map[string][]*Node) wg.Add(1) go func(node *Node, nodeID string) { defer wg.Done() - select { - case <-ctx.Done(): + if err := sem.Acquire(gctx, 1); err != nil { return - case <-gctx.Done(): + } + defer sem.Release(1) + + if atomic.LoadInt32(&foundCount) >= int32(req-alreadyFound) { return - case semaphore <- struct{}{}: - defer func() { <-semaphore }() } indices := fetchMap[nodeID] @@ -985,8 +988,6 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, break } } - } else { - contactsMap[nodeID][k] = v.Closest } } @@ -1009,33 +1010,7 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, }) } - for _, closestNodes := range contactsMap { - for key, nodes := range closestNodes { - comparator, err := hex.DecodeString(key) - if err != nil { - logtrace.Error(ctx, "Failed to decode hex key in closestNodes.Range", logtrace.Fields{ - logtrace.FieldModule: "dht", - "key": key, - logtrace.FieldError: err.Error(), - }) - return 0, nil, err - } - bkey := base58.Encode(comparator) - - if _, ok := closestContacts[bkey]; !ok { - closestContacts[bkey] = &NodeList{Nodes: nodes, Comparator: comparator} - } else { - closestContacts[bkey].AddNodes(nodes) - } - } - } - - for key, nodes := range closestContacts { - nodes.Sort() - nodes.TopN(Alpha) - closestContacts[key] = nodes - } - return int(foundCount), closestContacts, firstErr + return int(foundCount), firstErr } func (s *DHT) doBatchGetValuesCall(ctx context.Context, node *Node, requestKeys map[string]KeyValWithClosest) (map[string]KeyValWithClosest, error) { @@ -1413,17 +1388,25 @@ func (s *DHT) addNode(ctx context.Context, node *Node) *Node { if node != nil { fields["peer"] = node.String() } - logtrace.Debug(ctx, "Rejecting node: peer below minimum version", fields) + logtrace.Info(ctx, "Rejecting node: peer below minimum version", fields) return nil } // Allow localhost for integration testing isIntegrationTest := os.Getenv("INTEGRATION_TEST") == "true" if node.IP == "" || node.IP == "0.0.0.0" || (!isIntegrationTest && node.IP == "127.0.0.1") { - logtrace.Debug(ctx, "Trying to add invalid node", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Info(ctx, "Rejecting node: invalid IP", logtrace.Fields{ + logtrace.FieldModule: "p2p", + "ip": node.IP, + "node": node.String(), + "integration_test": isIntegrationTest, + }) return nil } if bytes.Equal(node.ID, s.ht.self.ID) { - logtrace.Debug(ctx, "Trying to add itself", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Info(ctx, "Rejecting node: is self", logtrace.Fields{ + logtrace.FieldModule: "p2p", + "node": node.String(), + }) return nil } node.SetHashedID() diff --git a/p2p/kademlia/node.go b/p2p/kademlia/node.go index 0011c8be..8a3e14f1 100644 --- a/p2p/kademlia/node.go +++ b/p2p/kademlia/node.go @@ -183,15 +183,13 @@ func (s *NodeList) DelNode(node *Node) { } func haveAllNodes(a, b []*Node) bool { + bSet := make(map[string]bool, len(b)) + for _, y := range b { + bSet[string(y.HashedID)] = true + } + for _, x := range a { - found := false - for _, y := range b { - if bytes.Equal(x.HashedID, y.HashedID) { - found = true - break - } - } - if !found { + if !bSet[string(x.HashedID)] { return false } } From e3225762e5cfff60272fe062b3ccf3314815a6d4 Mon Sep 17 00:00:00 2001 From: J Bilal rafique Date: Wed, 22 Oct 2025 17:06:53 +0500 Subject: [PATCH 32/36] log fix --- p2p/kademlia/dht.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index acc4193c..bf7a7fd0 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -925,7 +925,6 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, continue } - if s.ignorelist.Banned(node) { logtrace.Debug(ctx, "Ignore banned node in iterate batch get values", logtrace.Fields{ logtrace.FieldModule: "dht", @@ -1388,7 +1387,7 @@ func (s *DHT) addNode(ctx context.Context, node *Node) *Node { if node != nil { fields["peer"] = node.String() } - logtrace.Info(ctx, "Rejecting node: peer below minimum version", fields) + logtrace.Debug(ctx, "Rejecting node: peer below minimum version", fields) return nil } // Allow localhost for integration testing From 5789526b5247debc34ab79ee8932eeedd733a209 Mon Sep 17 00:00:00 2001 From: Matee ullah Malik <46045452+mateeullahmalik@users.noreply.github.com> Date: Wed, 22 Oct 2025 18:06:21 +0500 Subject: [PATCH 33/36] Rank supernodes by free ram (#207) --- p2p/kademlia/node.go | 4 +- sdk/task/cascade.go | 17 ++- sdk/task/download.go | 14 +- sdk/task/task.go | 317 +++++++++++++------------------------------ 4 files changed, 114 insertions(+), 238 deletions(-) diff --git a/p2p/kademlia/node.go b/p2p/kademlia/node.go index 0011c8be..23ec7b8f 100644 --- a/p2p/kademlia/node.go +++ b/p2p/kademlia/node.go @@ -23,8 +23,8 @@ type Node struct { // port of the node Port uint16 `json:"port,omitempty"` - // Version of the supernode binary (advertised to peers; may be used by min-version gating) - Version string `json:"version,omitempty"` + // Version of the supernode binary (advertised to peers; may be used by min-version gating) + Version string `json:"version,omitempty"` HashedID []byte } diff --git a/sdk/task/cascade.go b/sdk/task/cascade.go index 68184ea7..c10614d3 100644 --- a/sdk/task/cascade.go +++ b/sdk/task/cascade.go @@ -37,8 +37,8 @@ func (t *CascadeTask) Run(ctx context.Context) error { return err } - // 1 - Fetch the supernodes (single-pass probe: sanitize + load snapshot) - supernodes, loads, err := t.fetchSupernodesWithLoads(ctx, t.Action.Height) + // 1 - Fetch the supernodes + supernodes, err := t.fetchSupernodes(ctx, t.Action.Height) if err != nil { t.LogEvent(ctx, event.SDKSupernodesUnavailable, "Supernodes unavailable", event.EventData{event.KeyError: err.Error()}) @@ -46,8 +46,11 @@ func (t *CascadeTask) Run(ctx context.Context) error { return err } - // Rank by current load snapshot (fewest first), tie-break deterministically - supernodes = t.orderByLoadSnapshotThenDeterministic(supernodes, loads) + // Initial concurrent balance filter (one-time) + supernodes = t.filterByMinBalance(ctx, supernodes) + + // Rank by available free RAM (descending). Unknown RAM stays after known. + supernodes = t.orderByFreeRAM(ctx, supernodes) t.LogEvent(ctx, event.SDKSupernodesFound, "Supernodes found.", event.EventData{event.KeyCount: len(supernodes)}) // 2 - Register with the supernodes @@ -76,11 +79,11 @@ func (t *CascadeTask) registerWithSupernodes(ctx context.Context, supernodes lum var lastErr error attempted := 0 - // Work on a copy and re-rank between attempts to avoid stale ordering + // Work on a copy; re-rank by free RAM between attempts remaining := append(lumera.Supernodes(nil), supernodes...) for len(remaining) > 0 { - // Refresh load-aware ordering for remaining candidates - remaining = t.orderByLoadThenDeterministic(ctx, remaining) + // Re-rank remaining nodes by available RAM (descending) + remaining = t.orderByFreeRAM(ctx, remaining) sn := remaining[0] iteration := attempted + 1 diff --git a/sdk/task/download.go b/sdk/task/download.go index eb9ad8eb..ed9d98ef 100644 --- a/sdk/task/download.go +++ b/sdk/task/download.go @@ -36,15 +36,18 @@ func NewCascadeDownloadTask(base BaseTask, actionId string, outputPath string, s func (t *CascadeDownloadTask) Run(ctx context.Context) error { t.LogEvent(ctx, event.SDKTaskStarted, "Running cascade download task", nil) - // 1 – fetch super-nodes (single-pass probe: sanitize + load snapshot) - supernodes, loads, err := t.fetchSupernodesWithLoads(ctx, t.Action.Height) + // 1 – fetch super-nodes (plain) + supernodes, err := t.fetchSupernodes(ctx, t.Action.Height) if err != nil { t.LogEvent(ctx, event.SDKSupernodesUnavailable, "super-nodes unavailable", event.EventData{event.KeyError: err.Error()}) t.LogEvent(ctx, event.SDKTaskFailed, "task failed", event.EventData{event.KeyError: err.Error()}) return err } - // Rank by current load snapshot (fewest first), tie-break deterministically - supernodes = t.orderByLoadSnapshotThenDeterministic(supernodes, loads) + // Initial concurrent balance filter (one-time) + supernodes = t.filterByMinBalance(ctx, supernodes) + + // Rank by available free RAM (descending). Unknown RAM stays after known. + supernodes = t.orderByFreeRAM(ctx, supernodes) t.LogEvent(ctx, event.SDKSupernodesFound, "super-nodes found", event.EventData{event.KeyCount: len(supernodes)}) // 2 – download from super-nodes @@ -83,7 +86,8 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern remaining := append(lumera.Supernodes(nil), supernodes...) attempted := 0 for len(remaining) > 0 { - remaining = t.orderByLoadThenDeterministic(ctx, remaining) + // Re-rank remaining nodes by available RAM (descending) + remaining = t.orderByFreeRAM(ctx, remaining) sn := remaining[0] iteration := attempted + 1 diff --git a/sdk/task/task.go b/sdk/task/task.go index 1779a93a..bce7cf1b 100644 --- a/sdk/task/task.go +++ b/sdk/task/task.go @@ -8,7 +8,6 @@ import ( "sync" sdkmath "cosmossdk.io/math" - "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" txmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" "github.com/LumeraProtocol/supernode/v2/sdk/config" @@ -77,8 +76,6 @@ func (t *BaseTask) LogEvent(ctx context.Context, evt event.EventType, msg string t.emitEvent(ctx, evt, additionalInfo) } -// (removed) fetchSupernodes: replaced by fetchSupernodesWithLoads single-pass probe - // isServing pings the super-node once with a short timeout. func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool { ctx, cancel := context.WithTimeout(parent, connectionTimeout) @@ -104,266 +101,95 @@ func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool { t.logger.Info(ctx, "reject supernode: health not SERVING", "error", err, "status", statusStr) return false } - - // Then check P2P peers count via status - status, err := client.GetSupernodeStatus(ctx) - if err != nil { - t.logger.Info(ctx, "reject supernode: status fetch failed", "error", err) - return false - } - if status.Network.PeersCount <= 1 { - t.logger.Info(ctx, "reject supernode: insufficient peers", "peers_count", status.Network.PeersCount) - return false - } - - denom := txmod.DefaultFeeDenom // base denom (micro), e.g., "ulume" - bal, err := t.client.GetBalance(ctx, sn.CosmosAddress, denom) - if err != nil || bal == nil || bal.Balance == nil { - t.logger.Info(ctx, "reject supernode: balance fetch failed or empty", "error", err) - return false - } - // Require at least 1 LUME = 10^6 micro (ulume) - min := sdkmath.NewInt(1_000_000) - if bal.Balance.Amount.LT(min) { - t.logger.Info(ctx, "reject supernode: insufficient balance", "amount", bal.Balance.Amount.String(), "min", min.String()) - return false - } - return true } -// fetchSupernodesWithLoads performs a single-pass probe that both sanitizes candidates -// and captures their current running task load for initial ranking. -// Returns the healthy supernodes and a map of node-key -> load. -func (t *BaseTask) fetchSupernodesWithLoads(ctx context.Context, height int64) (lumera.Supernodes, map[string]int, error) { +// No health, status, balance or load checks are done here. +func (t *BaseTask) fetchSupernodes(ctx context.Context, height int64) (lumera.Supernodes, error) { sns, err := t.client.GetSupernodes(ctx, height) if err != nil { - return nil, nil, fmt.Errorf("fetch supernodes: %w", err) + return nil, fmt.Errorf("fetch supernodes: %w", err) } if len(sns) == 0 { - return nil, nil, errors.New("no supernodes found") + return nil, errors.New("no supernodes found") } - - healthy := make(lumera.Supernodes, 0, len(sns)) - loads := make(map[string]int, len(sns)) - mu := sync.Mutex{} - - eg, ctx := errgroup.WithContext(ctx) - for _, sn := range sns { - sn := sn - eg.Go(func() error { - cctx, cancel := context.WithTimeout(ctx, connectionTimeout) - defer cancel() - - client, err := net.NewClientFactory(cctx, t.logger, t.keyring, t.client, net.FactoryConfig{ - LocalCosmosAddress: t.config.Account.LocalCosmosAddress, - PeerType: t.config.Account.PeerType, - }).CreateClient(cctx, sn) - if err != nil { - t.logger.Info(cctx, "reject supernode: client create failed", "reason", err.Error(), "endpoint", sn.GrpcEndpoint, "cosmos", sn.CosmosAddress) - return nil - } - defer client.Close(cctx) - - // Health - resp, err := client.HealthCheck(cctx) - if err != nil || resp.Status != grpc_health_v1.HealthCheckResponse_SERVING { - statusStr := "nil" - if resp != nil { - statusStr = resp.Status.String() - } - t.logger.Info(cctx, "reject supernode: health not SERVING", "error", err, "status", statusStr) - return nil - } - - // Status (for peers + load) - status, err := client.GetSupernodeStatus(cctx) - if err != nil { - t.logger.Info(cctx, "reject supernode: status fetch failed", "error", err) - return nil - } - // Removed SDK-level version gating; rely on network/node policies instead. - - // Compute load from running tasks (sum of task_count across services) - total := 0 - for _, st := range status.GetRunningTasks() { - if st == nil { - continue - } - if c := int(st.GetTaskCount()); c > 0 { - total += c - } else if ids := st.GetTaskIds(); len(ids) > 0 { - total += len(ids) - } - } - - // Balance - denom := txmod.DefaultFeeDenom - bal, err := t.client.GetBalance(cctx, sn.CosmosAddress, denom) - if err != nil || bal == nil || bal.Balance == nil { - t.logger.Info(cctx, "reject supernode: balance fetch failed or empty", "error", err) - return nil - } - min := sdkmath.NewInt(1_000_000) - if bal.Balance.Amount.LT(min) { - t.logger.Info(cctx, "reject supernode: insufficient balance", "amount", bal.Balance.Amount.String(), "min", min.String()) - return nil - } - - // Accept - mu.Lock() - healthy = append(healthy, sn) - key := sn.CosmosAddress - if key == "" { - key = sn.GrpcEndpoint - } - loads[key] = total - mu.Unlock() - return nil - }) - } - if err := eg.Wait(); err != nil { - return nil, nil, fmt.Errorf("health-check goroutines: %w", err) - } - if len(healthy) == 0 { - return nil, nil, errors.New("no healthy supernodes found") - } - return healthy, loads, nil + return sns, nil } -// orderByLoadSnapshotThenDeterministic sorts using a provided load snapshot; nodes missing -// in the snapshot are considered unknown-load and placed after known-load nodes. -func (t *BaseTask) orderByLoadSnapshotThenDeterministic(sns lumera.Supernodes, loads map[string]int) lumera.Supernodes { +func (t *BaseTask) orderByFreeRAM(parent context.Context, sns lumera.Supernodes) lumera.Supernodes { if len(sns) <= 1 { return sns } - det := orderSupernodesByDeterministicDistance(t.ActionID, append(lumera.Supernodes(nil), sns...)) - idx := make(map[string]int, len(det)) - for i, sn := range det { - key := sn.CosmosAddress - if key == "" { - key = sn.GrpcEndpoint - } - idx[key] = i - } - type scored struct { - sn lumera.Supernode - load int - loadKnown bool - tieIdx int - } - arr := make([]scored, 0, len(sns)) - for _, sn := range sns { - key := sn.CosmosAddress - if key == "" { - key = sn.GrpcEndpoint - } - l, ok := loads[key] - arr = append(arr, scored{sn: sn, load: l, loadKnown: ok, tieIdx: idx[key]}) - } - - sort.Slice(arr, func(i, j int) bool { - ai, aj := arr[i], arr[j] - if ai.loadKnown != aj.loadKnown { - return ai.loadKnown - } - if ai.loadKnown && aj.loadKnown && ai.load != aj.load { - return ai.load < aj.load - } - return ai.tieIdx < aj.tieIdx - }) - - out := make(lumera.Supernodes, len(arr)) - for i := range arr { - out[i] = arr[i].sn + idx int + sn lumera.Supernode + ramGb float64 + known bool } - return out -} -// orderByLoadThenDeterministic ranks supernodes by their current running task count (ascending). -// Ties are broken deterministically using orderSupernodesByDeterministicDistance with ActionID as seed. -func (t *BaseTask) orderByLoadThenDeterministic(parent context.Context, sns lumera.Supernodes) lumera.Supernodes { - if len(sns) <= 1 { - return sns + out := make([]scored, len(sns)) + // Best-effort parallel status fetch; do not filter or fail. + // We intentionally avoid health/peer/balance checks here. + for i, sn := range sns { + out[i] = scored{idx: i, sn: sn, ramGb: 0, known: false} } - // Precompute deterministic tie-break order index per node - det := orderSupernodesByDeterministicDistance(t.ActionID, append(lumera.Supernodes(nil), sns...)) - idx := make(map[string]int, len(det)) - for i, sn := range det { - key := sn.CosmosAddress - if key == "" { - key = sn.GrpcEndpoint - } - idx[key] = i + // Query in parallel with a short timeout to avoid blocking too long per node + // Reuse the connectionTimeout constant for symmetry with health probes. + type result struct { + i int + ram float64 + ok bool } - - type scored struct { - sn lumera.Supernode - load int - loadKnown bool - tieIdx int - } - - out := make([]scored, len(sns)) - - // Collect loads in parallel under the same short connection timeout. - eg, ctx := errgroup.WithContext(parent) + ch := make(chan result, len(sns)) for i, sn := range sns { i, sn := i, sn - out[i] = scored{sn: sn, load: 0, loadKnown: false, tieIdx: func() int { - k := sn.CosmosAddress - if k == "" { - k = sn.GrpcEndpoint - } - return idx[k] - }()} - eg.Go(func() error { - cctx, cancel := context.WithTimeout(ctx, connectionTimeout) + go func() { + cctx, cancel := context.WithTimeout(parent, connectionTimeout) defer cancel() client, err := net.NewClientFactory(cctx, t.logger, t.keyring, t.client, net.FactoryConfig{ LocalCosmosAddress: t.config.Account.LocalCosmosAddress, PeerType: t.config.Account.PeerType, }).CreateClient(cctx, sn) if err != nil { - return nil // unknown load; keep candidate + ch <- result{i: i, ram: 0, ok: false} + return } defer client.Close(cctx) + status, err := client.GetSupernodeStatus(cctx) if err != nil || status == nil { - return nil + ch <- result{i: i, ram: 0, ok: false} + return } - // Sum total running tasks across services - total := 0 - for _, st := range status.GetRunningTasks() { - if st == nil { - continue - } - if c := int(st.GetTaskCount()); c > 0 { - total += c - } else if ids := st.GetTaskIds(); len(ids) > 0 { - total += len(ids) - } + res := status.GetResources() + if res == nil || res.GetMemory() == nil { + ch <- result{i: i, ram: 0, ok: false} + return } - out[i].load = total - out[i].loadKnown = true - return nil - }) + ch <- result{i: i, ram: res.GetMemory().GetAvailableGb(), ok: true} + }() + } + // Collect results with a cap bounded by len(sns) + for k := 0; k < len(sns); k++ { + r := <-ch + if r.ok { + out[r.i].ramGb = r.ram + out[r.i].known = true + } } - _ = eg.Wait() // best-effort; unknown loads are placed after known ones below - sort.Slice(out, func(i, j int) bool { + // Known RAM first, then by RAM desc. For ties and unknowns, preserve original order. + sort.SliceStable(out, func(i, j int) bool { ai, aj := out[i], out[j] - if ai.loadKnown != aj.loadKnown { - return ai.loadKnown // known loads first + if ai.known != aj.known { + return ai.known } - if ai.loadKnown && aj.loadKnown && ai.load != aj.load { - return ai.load < aj.load + if ai.known && aj.known && ai.ramGb != aj.ramGb { + return ai.ramGb > aj.ramGb } - // Tie-break deterministically - return ai.tieIdx < aj.tieIdx + return ai.idx < aj.idx }) res := make(lumera.Supernodes, len(out)) @@ -372,3 +198,46 @@ func (t *BaseTask) orderByLoadThenDeterministic(parent context.Context, sns lume } return res } + +// filterByMinBalance filters supernodes by requiring at least a minimum balance +// in the default fee denom. This runs concurrently and is intended to be used +// once during initial discovery only. +func (t *BaseTask) filterByMinBalance(parent context.Context, sns lumera.Supernodes) lumera.Supernodes { + if len(sns) == 0 { + return sns + } + // Require at least 1 LUME = 10^6 ulume by default. + min := sdkmath.NewInt(1_000_000) + denom := txmod.DefaultFeeDenom + + keep := make([]bool, len(sns)) + var wg sync.WaitGroup + wg.Add(len(sns)) + for i, sn := range sns { + i, sn := i, sn + go func() { + defer wg.Done() + ctx, cancel := context.WithTimeout(parent, connectionTimeout) + defer cancel() + bal, err := t.client.GetBalance(ctx, sn.CosmosAddress, denom) + if err != nil || bal == nil || bal.Balance == nil { + t.logger.Info(ctx, "reject supernode: balance fetch failed or empty", "error", err, "address", sn.CosmosAddress) + return + } + if bal.Balance.Amount.LT(min) { + t.logger.Info(ctx, "reject supernode: insufficient balance", "amount", bal.Balance.Amount.String(), "min", min.String(), "address", sn.CosmosAddress) + return + } + keep[i] = true + }() + } + wg.Wait() + + out := make(lumera.Supernodes, 0, len(sns)) + for i, sn := range sns { + if keep[i] { + out = append(out, sn) + } + } + return out +} From f40a187b4d9ade89ae97af964c2f726d083a77e7 Mon Sep 17 00:00:00 2001 From: Matee ullah Malik <46045452+mateeullahmalik@users.noreply.github.com> Date: Wed, 22 Oct 2025 21:33:37 +0500 Subject: [PATCH 34/36] Relax hard equality check on action fee (#213) --- sdk/action/client.go | 8 +++++--- supernode/cascade/helper.go | 17 ++++++++++------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/sdk/action/client.go b/sdk/action/client.go index 147be819..296cba9c 100644 --- a/sdk/action/client.go +++ b/sdk/action/client.go @@ -298,9 +298,11 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath denom := paramsResp.Params.BaseActionFee.Denom exp := paramsResp.Params.ExpirationDuration - // Compute data size in KB for fee - kb := int(fi.Size()) / 1024 - feeResp, err := c.lumeraClient.GetActionFee(ctx, strconv.Itoa(kb)) + // Compute data size in KB for fee, rounding up to avoid underpaying + // Keep consistent with supernode verification which uses ceil(bytes/1024) + sizeBytes := fi.Size() + kb := (sizeBytes + 1023) / 1024 // int64 division + feeResp, err := c.lumeraClient.GetActionFee(ctx, strconv.FormatInt(kb, 10)) if err != nil { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("get action fee: %w", err) } diff --git a/supernode/cascade/helper.go b/supernode/cascade/helper.go index a2006354..2d204c52 100644 --- a/supernode/cascade/helper.go +++ b/supernode/cascade/helper.go @@ -195,13 +195,16 @@ func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action } requiredFee := sdk.NewCoin("ulume", math.NewInt(amount)) logtrace.Debug(ctx, "calculated required fee", logtrace.Fields{"fee": requiredFee.String(), "dataBytes": dataSize}) - if action.Price == nil || action.Price.String() != requiredFee.String() { - got := "" - if action.Price != nil { - got = action.Price.String() - } - return task.wrapErr(ctx, "insufficient fee", errors.Errorf("expected at least %s, got %s", requiredFee.String(), got), fields) + // Accept paying more than the minimum required fee. Only enforce denom match and Amount >= required. + if action.Price == nil { + return task.wrapErr(ctx, "insufficient fee", errors.Errorf("expected at least %s, got ", requiredFee.String()), fields) + } + if action.Price.Denom != requiredFee.Denom { + return task.wrapErr(ctx, "invalid fee denom", errors.Errorf("expected denom %s, got %s", requiredFee.Denom, action.Price.Denom), fields) + } + if action.Price.Amount.LT(requiredFee.Amount) { + return task.wrapErr(ctx, "insufficient fee", errors.Errorf("expected at least %s, got %s", requiredFee.String(), action.Price.String()), fields) } - logtrace.Info(ctx, "register: verify action fee ok", logtrace.Fields{"required_fee": requiredFee.String()}) + logtrace.Info(ctx, "register: verify action fee ok", logtrace.Fields{"required_fee": requiredFee.String(), "provided_fee": action.Price.String()}) return nil } From 89c4abb4dad68a01694218b0aa6a6ba5f0842207 Mon Sep 17 00:00:00 2001 From: Matee ullah Malik <46045452+mateeullahmalik@users.noreply.github.com> Date: Thu, 23 Oct 2025 14:31:04 +0500 Subject: [PATCH 35/36] Rank supernodes; xor, ram,storage (#214) --- p2p/kademlia/dht.go | 8 +- sdk/task/cascade.go | 33 +++--- sdk/task/download.go | 38 ++----- sdk/task/helpers.go | 22 +--- sdk/task/task.go | 234 ++++++++++++++++++------------------------- 5 files changed, 125 insertions(+), 210 deletions(-) diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index bf7a7fd0..13615deb 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -1395,16 +1395,16 @@ func (s *DHT) addNode(ctx context.Context, node *Node) *Node { if node.IP == "" || node.IP == "0.0.0.0" || (!isIntegrationTest && node.IP == "127.0.0.1") { logtrace.Info(ctx, "Rejecting node: invalid IP", logtrace.Fields{ logtrace.FieldModule: "p2p", - "ip": node.IP, - "node": node.String(), - "integration_test": isIntegrationTest, + "ip": node.IP, + "node": node.String(), + "integration_test": isIntegrationTest, }) return nil } if bytes.Equal(node.ID, s.ht.self.ID) { logtrace.Info(ctx, "Rejecting node: is self", logtrace.Fields{ logtrace.FieldModule: "p2p", - "node": node.String(), + "node": node.String(), }) return nil } diff --git a/sdk/task/cascade.go b/sdk/task/cascade.go index c10614d3..a2cdcd3a 100644 --- a/sdk/task/cascade.go +++ b/sdk/task/cascade.go @@ -46,12 +46,8 @@ func (t *CascadeTask) Run(ctx context.Context) error { return err } - // Initial concurrent balance filter (one-time) - supernodes = t.filterByMinBalance(ctx, supernodes) - - // Rank by available free RAM (descending). Unknown RAM stays after known. - supernodes = t.orderByFreeRAM(ctx, supernodes) - t.LogEvent(ctx, event.SDKSupernodesFound, "Supernodes found.", event.EventData{event.KeyCount: len(supernodes)}) + // Log available candidates; streaming will happen within registration + t.LogEvent(ctx, event.SDKSupernodesFound, "Supernodes fetched", event.EventData{event.KeyCount: len(supernodes)}) // 2 - Register with the supernodes if err := t.registerWithSupernodes(ctx, supernodes); err != nil { @@ -77,15 +73,18 @@ func (t *CascadeTask) registerWithSupernodes(ctx context.Context, supernodes lum TaskId: t.TaskID, } + // Strict XOR-first qualification and attempts + fileSize := getFileSizeBytes(t.filePath) + var minRam uint64 + if fileSize > 0 { + minRam = uint64(fileSize) * uploadRAMMultiplier + } + ordered := t.orderByXORDistance(ctx, supernodes) + var lastErr error attempted := 0 - // Work on a copy; re-rank by free RAM between attempts - remaining := append(lumera.Supernodes(nil), supernodes...) - for len(remaining) > 0 { - // Re-rank remaining nodes by available RAM (descending) - remaining = t.orderByFreeRAM(ctx, remaining) - sn := remaining[0] - iteration := attempted + 1 + for i, sn := range ordered { + iteration := i + 1 t.LogEvent(ctx, event.SDKRegistrationAttempt, "attempting registration with supernode", event.EventData{ event.KeySupernode: sn.GrpcEndpoint, @@ -94,10 +93,8 @@ func (t *CascadeTask) registerWithSupernodes(ctx context.Context, supernodes lum }) // Re-check serving status just-in-time to avoid calling a node that became down/underpeered - if !t.isServing(ctx, sn) { - t.logger.Info(ctx, "skip supernode: not serving", "supernode", sn.GrpcEndpoint, "sn-address", sn.CosmosAddress, "iteration", iteration) - // Drop this node and retry with the rest - remaining = remaining[1:] + // Ensure node qualifies before attempt + if !t.nodeQualifies(ctx, sn, minStorageThresholdBytes, minRam) { continue } @@ -110,8 +107,6 @@ func (t *CascadeTask) registerWithSupernodes(ctx context.Context, supernodes lum event.KeyError: err.Error(), }) lastErr = err - // Drop this node and retry with the rest (re-ranked next loop) - remaining = remaining[1:] continue } diff --git a/sdk/task/download.go b/sdk/task/download.go index ed9d98ef..d9b2d800 100644 --- a/sdk/task/download.go +++ b/sdk/task/download.go @@ -43,12 +43,8 @@ func (t *CascadeDownloadTask) Run(ctx context.Context) error { t.LogEvent(ctx, event.SDKTaskFailed, "task failed", event.EventData{event.KeyError: err.Error()}) return err } - // Initial concurrent balance filter (one-time) - supernodes = t.filterByMinBalance(ctx, supernodes) - - // Rank by available free RAM (descending). Unknown RAM stays after known. - supernodes = t.orderByFreeRAM(ctx, supernodes) - t.LogEvent(ctx, event.SDKSupernodesFound, "super-nodes found", event.EventData{event.KeyCount: len(supernodes)}) + // Log available candidates; streaming will happen within download phase + t.LogEvent(ctx, event.SDKSupernodesFound, "super-nodes fetched", event.EventData{event.KeyCount: len(supernodes)}) // 2 – download from super-nodes if err := t.downloadFromSupernodes(ctx, supernodes); err != nil { @@ -81,15 +77,13 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern } } - // Try supernodes sequentially with re-ranking between attempts + // Strict XOR-first qualification and attempts (downloads: storage-only threshold) + ordered := t.orderByXORDistance(ctx, supernodes) + var lastErr error - remaining := append(lumera.Supernodes(nil), supernodes...) attempted := 0 - for len(remaining) > 0 { - // Re-rank remaining nodes by available RAM (descending) - remaining = t.orderByFreeRAM(ctx, remaining) - sn := remaining[0] - iteration := attempted + 1 + for i, sn := range ordered { + iteration := i + 1 // Log download attempt t.LogEvent(ctx, event.SDKDownloadAttempt, "attempting download from super-node", event.EventData{ @@ -98,10 +92,8 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern event.KeyIteration: iteration, }) - // Re-check serving status just-in-time to avoid calling a node that became down/underpeered - if !t.isServing(ctx, sn) { - t.logger.Info(ctx, "skip supernode: not serving", "supernode", sn.GrpcEndpoint, "sn-address", sn.CosmosAddress, "iteration", iteration) - remaining = remaining[1:] + // Ensure node qualifies before attempt + if !t.nodeQualifies(ctx, sn, minStorageThresholdBytes, 0) { continue } @@ -115,7 +107,6 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern event.KeyError: err.Error(), }) lastErr = err - remaining = remaining[1:] continue } @@ -135,17 +126,6 @@ func (t *CascadeDownloadTask) attemptDownload( factory *net.ClientFactory, req *supernodeservice.CascadeSupernodeDownloadRequest, ) error { - // Recheck liveness/busyness just before attempting download to handle delays - if !t.isServing(parent, sn) { - // Emit a concise event; detailed rejection reasons are logged inside isServing - t.LogEvent(parent, event.SDKDownloadFailure, "precheck: supernode not serving/busy", event.EventData{ - event.KeySupernode: sn.GrpcEndpoint, - event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyReason: "precheck_not_serving_or_busy", - }) - return fmt.Errorf("precheck: supernode not serving/busy") - } - ctx, cancel := context.WithTimeout(parent, downloadTimeout) defer cancel() diff --git a/sdk/task/helpers.go b/sdk/task/helpers.go index 1612f12d..2e9ee4c3 100644 --- a/sdk/task/helpers.go +++ b/sdk/task/helpers.go @@ -6,9 +6,7 @@ import ( "fmt" "math/big" "os" - "path/filepath" "sort" - "strings" "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" @@ -50,7 +48,7 @@ func (m *ManagerImpl) validateAction(ctx context.Context, actionID string) (lume } // validateSignature verifies the authenticity of a signature against an action's data hash. -// + // This function performs the following steps: // 1. Decodes the CASCADE metadata from the provided Lumera action // 2. Extracts the base64-encoded data hash from the metadata @@ -103,7 +101,7 @@ func (m *ManagerImpl) validateSignature(ctx context.Context, action lumera.Actio return nil } -// (Removed) Peers connectivity preflight is now enforced during discovery in isServing. +// func (m *ManagerImpl) validateDownloadAction(ctx context.Context, actionID string) (lumera.Action, error) { action, err := m.lumeraClient.GetAction(ctx, actionID) @@ -124,22 +122,6 @@ func (m *ManagerImpl) validateDownloadAction(ctx context.Context, actionID strin return action, nil } -// Helper function to ensure output path has the correct filename -func ensureOutputPathWithFilename(outputPath, filename string) string { - // If outputPath is empty, just return the filename - if outputPath == "" { - return filename - } - - // Check if the path already ends with the filename - if strings.HasSuffix(outputPath, filename) { - return outputPath - } - - // Otherwise, append the filename to the path - return filepath.Join(outputPath, filename) -} - func orderSupernodesByDeterministicDistance(seed string, sns lumera.Supernodes) lumera.Supernodes { if len(sns) == 0 || seed == "" { return sns diff --git a/sdk/task/task.go b/sdk/task/task.go index bce7cf1b..8588c3f3 100644 --- a/sdk/task/task.go +++ b/sdk/task/task.go @@ -4,8 +4,7 @@ import ( "context" "errors" "fmt" - "sort" - "sync" + "os" sdkmath "cosmossdk.io/math" txmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" @@ -26,6 +25,14 @@ const ( TaskTypeCascade TaskType = "CASCADE" ) +// Package-level thresholds and tuning +const ( + // Minimum available storage required on any volume (bytes) + minStorageThresholdBytes uint64 = 50 * 1024 * 1024 * 1024 // 50 GB + // Upload requires free RAM to be at least 8x the file size + uploadRAMMultiplier uint64 = 8 +) + // EventCallback is a function that processes events from tasks type EventCallback func(ctx context.Context, e event.Event) @@ -76,35 +83,6 @@ func (t *BaseTask) LogEvent(ctx context.Context, evt event.EventType, msg string t.emitEvent(ctx, evt, additionalInfo) } -// isServing pings the super-node once with a short timeout. -func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool { - ctx, cancel := context.WithTimeout(parent, connectionTimeout) - defer cancel() - - client, err := net.NewClientFactory(ctx, t.logger, t.keyring, t.client, net.FactoryConfig{ - LocalCosmosAddress: t.config.Account.LocalCosmosAddress, - PeerType: t.config.Account.PeerType, - }).CreateClient(ctx, sn) - if err != nil { - t.logger.Info(ctx, "reject supernode: client create failed", "reason", err.Error(), "endpoint", sn.GrpcEndpoint, "cosmos", sn.CosmosAddress) - return false - } - defer client.Close(ctx) - - // First check gRPC health - resp, err := client.HealthCheck(ctx) - if err != nil || resp.Status != grpc_health_v1.HealthCheckResponse_SERVING { - statusStr := "nil" - if resp != nil { - statusStr = resp.Status.String() - } - t.logger.Info(ctx, "reject supernode: health not SERVING", "error", err, "status", statusStr) - return false - } - return true -} - -// No health, status, balance or load checks are done here. func (t *BaseTask) fetchSupernodes(ctx context.Context, height int64) (lumera.Supernodes, error) { sns, err := t.client.GetSupernodes(ctx, height) if err != nil { @@ -116,128 +94,108 @@ func (t *BaseTask) fetchSupernodes(ctx context.Context, height int64) (lumera.Su return sns, nil } -func (t *BaseTask) orderByFreeRAM(parent context.Context, sns lumera.Supernodes) lumera.Supernodes { +// orderByXORDistance ranks supernodes by XOR distance to the action's data hash. +// If decoding metadata fails, falls back to using the action ID as the seed. +func (t *BaseTask) orderByXORDistance(ctx context.Context, sns lumera.Supernodes) lumera.Supernodes { if len(sns) <= 1 { return sns } - - type scored struct { - idx int - sn lumera.Supernode - ramGb float64 - known bool - } - - out := make([]scored, len(sns)) - // Best-effort parallel status fetch; do not filter or fail. - // We intentionally avoid health/peer/balance checks here. - for i, sn := range sns { - out[i] = scored{idx: i, sn: sn, ramGb: 0, known: false} - } - - // Query in parallel with a short timeout to avoid blocking too long per node - // Reuse the connectionTimeout constant for symmetry with health probes. - type result struct { - i int - ram float64 - ok bool - } - ch := make(chan result, len(sns)) - for i, sn := range sns { - i, sn := i, sn - go func() { - cctx, cancel := context.WithTimeout(parent, connectionTimeout) - defer cancel() - client, err := net.NewClientFactory(cctx, t.logger, t.keyring, t.client, net.FactoryConfig{ - LocalCosmosAddress: t.config.Account.LocalCosmosAddress, - PeerType: t.config.Account.PeerType, - }).CreateClient(cctx, sn) - if err != nil { - ch <- result{i: i, ram: 0, ok: false} - return - } - defer client.Close(cctx) - - status, err := client.GetSupernodeStatus(cctx) - if err != nil || status == nil { - ch <- result{i: i, ram: 0, ok: false} - return - } - res := status.GetResources() - if res == nil || res.GetMemory() == nil { - ch <- result{i: i, ram: 0, ok: false} - return - } - ch <- result{i: i, ram: res.GetMemory().GetAvailableGb(), ok: true} - }() - } - // Collect results with a cap bounded by len(sns) - for k := 0; k < len(sns); k++ { - r := <-ch - if r.ok { - out[r.i].ramGb = r.ram - out[r.i].known = true + // Try to decode the action metadata to get the Cascade data hash as seed + seed := t.ActionID + if t.client != nil && (t.Action.Metadata != nil || t.Action.ActionType != "") { + if meta, err := t.client.DecodeCascadeMetadata(ctx, t.Action); err == nil && meta.DataHash != "" { + seed = meta.DataHash } } + return orderSupernodesByDeterministicDistance(seed, sns) +} - // Known RAM first, then by RAM desc. For ties and unknowns, preserve original order. - sort.SliceStable(out, func(i, j int) bool { - ai, aj := out[i], out[j] - if ai.known != aj.known { - return ai.known - } - if ai.known && aj.known && ai.ramGb != aj.ramGb { - return ai.ramGb > aj.ramGb - } - return ai.idx < aj.idx - }) +// filterByResourceThresholds removes supernodes that do not satisfy minimum +// available storage and free RAM thresholds. +// - minStorageBytes: minimum available storage on any volume (bytes) +// - minFreeRamBytes: minimum free RAM (bytes). If 0, RAM check is skipped. - res := make(lumera.Supernodes, len(out)) - for i := range out { - res[i] = out[i].sn +// helper: get file size (bytes). returns 0 on error +func getFileSizeBytes(p string) int64 { + fi, err := os.Stat(p) + if err != nil { + return 0 } - return res + return fi.Size() } -// filterByMinBalance filters supernodes by requiring at least a minimum balance -// in the default fee denom. This runs concurrently and is intended to be used -// once during initial discovery only. -func (t *BaseTask) filterByMinBalance(parent context.Context, sns lumera.Supernodes) lumera.Supernodes { - if len(sns) == 0 { - return sns +// nodeQualifies performs balance, health, and resource checks for a supernode. +func (t *BaseTask) nodeQualifies(parent context.Context, sn lumera.Supernode, minStorageBytes uint64, minFreeRamBytes uint64) bool { + // 1) Balance check (require at least 1 LUME) + if !t.balanceOK(parent, sn) { + return false } - // Require at least 1 LUME = 10^6 ulume by default. - min := sdkmath.NewInt(1_000_000) + + // 2) Health + resources via a single client session + ctx, cancel := context.WithTimeout(parent, connectionTimeout) + defer cancel() + client, err := net.NewClientFactory(ctx, t.logger, t.keyring, t.client, net.FactoryConfig{ + LocalCosmosAddress: t.config.Account.LocalCosmosAddress, + PeerType: t.config.Account.PeerType, + }).CreateClient(ctx, sn) + if err != nil { + return false + } + defer client.Close(ctx) + + // Health check + h, err := client.HealthCheck(ctx) + if err != nil || h == nil || h.Status != grpc_health_v1.HealthCheckResponse_SERVING { + return false + } + + // Resource thresholds + return t.resourcesOK(ctx, client, sn, minStorageBytes, minFreeRamBytes) +} + +func (t *BaseTask) balanceOK(parent context.Context, sn lumera.Supernode) bool { + ctx, cancel := context.WithTimeout(parent, connectionTimeout) + defer cancel() + min := sdkmath.NewInt(1_000_000) // 1 LUME in ulume denom := txmod.DefaultFeeDenom + bal, err := t.client.GetBalance(ctx, sn.CosmosAddress, denom) + if err != nil || bal == nil || bal.Balance == nil { + return false + } + if bal.Balance.Amount.LT(min) { + return false + } + return true +} - keep := make([]bool, len(sns)) - var wg sync.WaitGroup - wg.Add(len(sns)) - for i, sn := range sns { - i, sn := i, sn - go func() { - defer wg.Done() - ctx, cancel := context.WithTimeout(parent, connectionTimeout) - defer cancel() - bal, err := t.client.GetBalance(ctx, sn.CosmosAddress, denom) - if err != nil || bal == nil || bal.Balance == nil { - t.logger.Info(ctx, "reject supernode: balance fetch failed or empty", "error", err, "address", sn.CosmosAddress) - return - } - if bal.Balance.Amount.LT(min) { - t.logger.Info(ctx, "reject supernode: insufficient balance", "amount", bal.Balance.Amount.String(), "min", min.String(), "address", sn.CosmosAddress) - return +func (t *BaseTask) resourcesOK(ctx context.Context, client net.SupernodeClient, sn lumera.Supernode, minStorageBytes uint64, minFreeRamBytes uint64) bool { + status, err := client.GetSupernodeStatus(ctx) + if err != nil || status == nil || status.Resources == nil { + return false + } + // Storage: any volume must satisfy available >= minStorageBytes + if minStorageBytes > 0 { + ok := false + for _, vol := range status.Resources.StorageVolumes { + if vol != nil && vol.AvailableBytes >= minStorageBytes { + ok = true + break } - keep[i] = true - }() + } + if !ok { + return false + } } - wg.Wait() - - out := make(lumera.Supernodes, 0, len(sns)) - for i, sn := range sns { - if keep[i] { - out = append(out, sn) + // RAM: available_gb must be >= required GiB + if minFreeRamBytes > 0 { + mem := status.Resources.Memory + if mem == nil { + return false + } + requiredGiB := float64(minFreeRamBytes) / (1024.0 * 1024.0 * 1024.0) + if mem.AvailableGb < requiredGiB { + return false } } - return out + return true } From 4e1a72655aa17176eb05944eaa18a9ea2c04fc0b Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Thu, 23 Oct 2025 14:56:11 +0500 Subject: [PATCH 36/36] exclude sn free resource check in sdk --- sdk/task/task.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sdk/task/task.go b/sdk/task/task.go index 8588c3f3..eea513fc 100644 --- a/sdk/task/task.go +++ b/sdk/task/task.go @@ -169,6 +169,10 @@ func (t *BaseTask) balanceOK(parent context.Context, sn lumera.Supernode) bool { } func (t *BaseTask) resourcesOK(ctx context.Context, client net.SupernodeClient, sn lumera.Supernode, minStorageBytes uint64, minFreeRamBytes uint64) bool { + // In tests, skip resource thresholds (keep balance + health via nodeQualifies) + if os.Getenv("INTEGRATION_TEST") == "true" { + return true + } status, err := client.GetSupernodeStatus(ctx) if err != nil || status == nil || status.Resources == nil { return false