diff --git a/cmd/sncli/cli/cli.go b/cmd/sncli/cli/cli.go index 45f88a6c..306531f0 100644 --- a/cmd/sncli/cli/cli.go +++ b/cmd/sncli/cli/cli.go @@ -2,9 +2,9 @@ package cli import ( "context" + "fmt" "log" "os" - "fmt" "time" "github.com/BurntSushi/toml" @@ -23,7 +23,7 @@ const ( // defaultConfigFileName is the default path to the configuration file. defaultConfigFileName = "config.toml" - // defaultConfigFolder is the default folder for configuration files. + // defaultConfigFolder is the default folder for configuration files. defaultConfigFolder = "~/.sncli" ) @@ -74,9 +74,9 @@ func (c *CLI) loadCLIConfig() error { _, err := toml.DecodeFile(path, &c.CfgOpts) if err != nil { if os.IsNotExist(err) { - return fmt.Errorf("Config file not found at %s", path) + return fmt.Errorf("config file not found at %s", path) } - return fmt.Errorf("Failed to load config from %s: %v", path, err) + return fmt.Errorf("failed to load config from %s: %v", path, err) } c.ConfigPath = path @@ -120,9 +120,8 @@ func (c *CLI) Initialize() error { // Create Lumera client adapter c.SdkConfig = sdkcfg.NewConfig( sdkcfg.AccountConfig{ - LocalCosmosAddress: c.CfgOpts.Keyring.LocalAddress, - KeyName: c.CfgOpts.Keyring.KeyName, - Keyring: c.kr, + KeyName: c.CfgOpts.Keyring.KeyName, + Keyring: c.kr, }, sdkcfg.LumeraConfig{ GRPCAddr: c.CfgOpts.Lumera.GRPCAddr, @@ -167,18 +166,20 @@ func (c *CLI) snClientInit() { GrpcEndpoint: c.CfgOpts.Supernode.GRPCEndpoint, } - clientFactory := sdknet.NewClientFactory( + clientFactory, err := sdknet.NewClientFactory( context.Background(), sdklog.NewNoopLogger(), c.kr, c.lumeraClient, sdknet.FactoryConfig{ - LocalCosmosAddress: c.CfgOpts.Keyring.LocalAddress, - PeerType: 1, // Simplenode + KeyName: c.SdkConfig.Account.KeyName, + PeerType: 1, // Simplenode }, ) + if err != nil { + log.Fatalf("Failed to create client factory: %v", err) + } - var err error c.snClient, err = clientFactory.CreateClient(context.Background(), supernode) if err != nil { log.Fatalf("Supernode client init failed: %v", err) @@ -190,4 +191,4 @@ func (c *CLI) P2PPing(timeout time.Duration) error { return fmt.Errorf("P2P: not initialized") } return c.p2p.Ping(timeout) -} \ No newline at end of file +} diff --git a/cmd/sncli/cli/cmd_get_status.go b/cmd/sncli/cli/cmd_get_status.go index 303d20cd..c6d67ce7 100644 --- a/cmd/sncli/cli/cmd_get_status.go +++ b/cmd/sncli/cli/cmd_get_status.go @@ -11,7 +11,7 @@ func (c *CLI) GetSupernodeStatus() error { resp, err := c.snClient.GetSupernodeStatus(context.Background()) if err != nil { - return fmt.Errorf("Get supernode status failed: %v", err) + return fmt.Errorf("get supernode status failed: %v", err) } fmt.Println("Supernode Status:") fmt.Printf(" Version: %s\n", resp.Version) diff --git a/cmd/sncli/cli/cmd_health_check.go b/cmd/sncli/cli/cmd_health_check.go index 160ddcb5..03e9eb12 100644 --- a/cmd/sncli/cli/cmd_health_check.go +++ b/cmd/sncli/cli/cmd_health_check.go @@ -11,7 +11,7 @@ func (c *CLI) HealthCheck() error { resp, err := c.snClient.HealthCheck(context.Background()) if err != nil { - return fmt.Errorf("Supernode health check failed: %v", err) + return fmt.Errorf("supernode health check failed: %v", err) } fmt.Println("✅ Health status:", resp.Status) return nil diff --git a/cmd/sncli/cli/utils.go b/cmd/sncli/cli/utils.go index b691d4aa..15e893a3 100644 --- a/cmd/sncli/cli/utils.go +++ b/cmd/sncli/cli/utils.go @@ -5,9 +5,6 @@ import ( "os" "path/filepath" "strings" - "net" - "strconv" - "fmt" ) func NormalizePath(path string) string { @@ -17,11 +14,11 @@ func NormalizePath(path string) string { if strings.HasPrefix(path, "~") { home, err := os.UserHomeDir() if err != nil { - log.Fatalf("Unable to resolve home directory: %v", err) + log.Fatalf("unable to resolve home directory: %v", err) } path = filepath.Join(home, path[1:]) } - path = filepath.Clean(path) + path = filepath.Clean(path) return path } @@ -34,15 +31,3 @@ func processConfigPath(path string) string { path = filepath.Clean(path) return path } - -func splitHostPort(hp string) (string, int, error) { - host, portStr, err := net.SplitHostPort(hp) - if err != nil { - return "", 0, err - } - p, err := strconv.Atoi(portStr) - if err != nil || p <= 0 || p > 65535 { - return "", 0, fmt.Errorf("invalid port: %s", portStr) - } - return host, p, nil -} \ No newline at end of file diff --git a/cmd/sncli/go.mod b/cmd/sncli/go.mod index 009f0840..a2f77a98 100644 --- a/cmd/sncli/go.mod +++ b/cmd/sncli/go.mod @@ -10,7 +10,7 @@ replace ( require ( github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c - github.com/LumeraProtocol/lumera v1.8.0 + github.com/LumeraProtocol/lumera v1.8.4 github.com/LumeraProtocol/supernode/v2 v2.3.88 github.com/cosmos/cosmos-sdk v0.53.0 github.com/spf13/cobra v1.10.1 diff --git a/cmd/sncli/go.sum b/cmd/sncli/go.sum index 332e380f..90812d2e 100644 --- a/cmd/sncli/go.sum +++ b/cmd/sncli/go.sum @@ -76,8 +76,8 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.8.0 h1:0t5/6qOSs9wKti7utPAWo9Jq8wk2X+L/eEaH8flk/Hc= -github.com/LumeraProtocol/lumera v1.8.0/go.mod h1:38uAZxxleZyXaWKbqOQKwjw7CSX92lTxdF+B7c4SRPw= +github.com/LumeraProtocol/lumera v1.8.4 h1:6XzLS9gd0m3lOnppNS05WuZx4VCBEGvUN/KpVkSjqro= +github.com/LumeraProtocol/lumera v1.8.4/go.mod h1:twrSLfuXcHvmfQoN5e02Bg7rfeevUjF34SVqEJIvH1E= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= diff --git a/sdk/README.md b/sdk/README.md index cea41654..c79db01e 100644 --- a/sdk/README.md +++ b/sdk/README.md @@ -74,7 +74,6 @@ import ( // Account configuration accountConfig := config.AccountConfig{ - LocalCosmosAddress: "lumera1abc...", // Your cosmos address KeyName: "my-key", // Key name in keyring Keyring: keyring, // Cosmos SDK keyring instance // PeerType is optional - defaults to SIMPLENODE when omitted (recommended for most users) @@ -98,7 +97,6 @@ if err := sdkConfig.Validate(); err != nil { ### Required Fields **AccountConfig:** -- `LocalCosmosAddress`: Your Lumera cosmos address (e.g., "lumera1abc...") - `KeyName`: Name of the key in your keyring - `Keyring`: Initialized Cosmos SDK keyring containing your keys - `PeerType`: Peer type from securekeyx (optional, defaults to SIMPLENODE when left empty, which is suitable for most use cases) @@ -129,7 +127,6 @@ kr, err := keyring.New("app-name", "file", "/path/to/keys", nil) ```go config := config.NewConfig( config.AccountConfig{ - LocalCosmosAddress: "lumera1...", KeyName: "my-key", Keyring: keyring, }, @@ -158,7 +155,6 @@ import ( // Set up configuration accountConfig := config.AccountConfig{ - LocalCosmosAddress: "lumera1...", // Your cosmos address KeyName: "your-key", // Name of the key in your keyring Keyring: keyring, // Cosmos SDK keyring instance } diff --git a/sdk/action/client.go b/sdk/action/client.go index 0a0a06b9..741adbde 100644 --- a/sdk/action/client.go +++ b/sdk/action/client.go @@ -60,6 +60,7 @@ type ClientImpl struct { logger log.Logger keyring keyring.Keyring lumeraClient lumera.Client + signerAddr string } // Verify interface compliance at compile time @@ -71,6 +72,11 @@ func NewClient(ctx context.Context, config config.Config, logger log.Logger) (Cl logger = log.NewNoopLogger() } + addr, err := keyringpkg.GetAddress(config.Account.Keyring, config.Account.KeyName) + if err != nil { + return nil, fmt.Errorf("resolve signer address: %w", err) + } + // Create lumera client once lumeraClient, err := lumera.NewAdapter(ctx, lumera.ConfigParams{ @@ -96,6 +102,7 @@ func NewClient(ctx context.Context, config config.Config, logger log.Logger) (Cl logger: logger, keyring: config.Account.Keyring, lumeraClient: lumeraClient, + signerAddr: addr.String(), }, nil } @@ -196,10 +203,14 @@ func (c *ClientImpl) GetSupernodeStatus(ctx context.Context, supernodeAddress st } // Create network client factory - clientFactory := net.NewClientFactory(ctx, c.logger, c.keyring, c.lumeraClient, net.FactoryConfig{ - LocalCosmosAddress: c.config.Account.LocalCosmosAddress, - PeerType: c.config.Account.PeerType, + clientFactory, err := net.NewClientFactory(ctx, c.logger, c.keyring, c.lumeraClient, net.FactoryConfig{ + KeyName: c.config.Account.KeyName, + PeerType: c.config.Account.PeerType, }) + if err != nil { + c.logger.Error(ctx, "Failed to create client factory", "error", err) + return nil, fmt.Errorf("failed to create client factory: %w", err) + } // Create client for the specific supernode supernodeClient, err := clientFactory.CreateClient(ctx, lumeraSupernode) @@ -278,13 +289,18 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath // Create signatures from the layout struct // get bech32 address for this key - indexSignatureFormat, _, err := cascadekit.CreateSignaturesWithKeyringADR36( + indexSignatureFormat, _, err := cascadekit.CreateSignaturesWithKeyring( layout, c.keyring, c.config.Account.KeyName, ic, max, ) + + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("create signatures: %w", err) + } + // Compute data hash (blake3) as base64 using a streaming file hash to avoid loading entire file h, err := utils.Blake3HashFile(filePath) if err != nil { @@ -350,3 +366,7 @@ func (c *ClientImpl) GenerateDownloadSignature(ctx context.Context, actionID, cr } return base64.StdEncoding.EncodeToString(sig), nil } + +func (c *ClientImpl) signerAddress() string { + return c.signerAddr +} diff --git a/sdk/config/config.go b/sdk/config/config.go index 899ac70d..99059aca 100644 --- a/sdk/config/config.go +++ b/sdk/config/config.go @@ -9,10 +9,9 @@ import ( // AccountConfig holds peer-to-peer addresses, ports, etc. type AccountConfig struct { - LocalCosmosAddress string - KeyName string - Keyring cosmoskeyring.Keyring - PeerType securekeyx.PeerType + KeyName string + Keyring cosmoskeyring.Keyring + PeerType securekeyx.PeerType } // LumeraConfig wraps all chain-specific dials. @@ -35,8 +34,6 @@ func NewConfig(account AccountConfig, lumera LumeraConfig) Config { func (c Config) Validate() error { switch { - case c.Account.LocalCosmosAddress == "": - return errors.New("config: Network.LocalCosmosAddress is required") case c.Lumera.GRPCAddr == "": return errors.New("config: Lumera.GRPCAddr is required") case c.Lumera.ChainID == "": diff --git a/sdk/net/factory.go b/sdk/net/factory.go index 80dfd13a..b0356997 100644 --- a/sdk/net/factory.go +++ b/sdk/net/factory.go @@ -9,13 +9,14 @@ import ( "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" "github.com/LumeraProtocol/supernode/v2/sdk/log" + keyringpkg "github.com/LumeraProtocol/supernode/v2/pkg/keyring" "github.com/cosmos/cosmos-sdk/crypto/keyring" ) // FactoryConfig contains configuration for the ClientFactory type FactoryConfig struct { - LocalCosmosAddress string - PeerType securekeyx.PeerType + KeyName string + PeerType securekeyx.PeerType } // ClientFactory creates and manages supernode clients @@ -25,16 +26,23 @@ type ClientFactory struct { clientOptions *client.ClientOptions config FactoryConfig lumeraClient lumera.Client + signerAddr string } // NewClientFactory creates a new client factory with the provided dependencies -func NewClientFactory(ctx context.Context, logger log.Logger, keyring keyring.Keyring, lumeraClient lumera.Client, config FactoryConfig) *ClientFactory { +func NewClientFactory(ctx context.Context, logger log.Logger, keyring keyring.Keyring, lumeraClient lumera.Client, config FactoryConfig) (*ClientFactory, error) { if logger == nil { logger = log.NewNoopLogger() } - logger.Debug(ctx, "Creating supernode client factory", - "localAddress", config.LocalCosmosAddress) + addr, err := keyringpkg.GetAddress(keyring, config.KeyName) + if err != nil { + logger.Error(ctx, "failed to resolve signer address from keyring", + map[string]interface{}{"key_name": config.KeyName, "error": err.Error()}, + ) + + return nil, fmt.Errorf("resolve signer address from keyring: %w", err) + } // Tuned for 1GB max files with 4MB chunks // Reduce in-flight memory by aligning windows and msg sizes to chunk size. @@ -52,7 +60,8 @@ func NewClientFactory(ctx context.Context, logger log.Logger, keyring keyring.Ke clientOptions: opts, config: config, lumeraClient: lumeraClient, - } + signerAddr: addr.String(), + }, nil } // CreateClient creates a client for a specific supernode diff --git a/sdk/net/impl.go b/sdk/net/impl.go index 45d154f1..b54a08bb 100644 --- a/sdk/net/impl.go +++ b/sdk/net/impl.go @@ -14,6 +14,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/sdk/log" pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + keyringpkg "github.com/LumeraProtocol/supernode/v2/pkg/keyring" "github.com/cosmos/cosmos-sdk/crypto/keyring" "google.golang.org/grpc" "google.golang.org/grpc/health/grpc_health_v1" @@ -49,19 +50,21 @@ func NewSupernodeClient(ctx context.Context, logger log.Logger, keyring keyring. if keyring == nil { return nil, fmt.Errorf("keyring cannot be nil") } - if factoryConfig.LocalCosmosAddress == "" { - return nil, fmt.Errorf("local cosmos address cannot be empty") - } if factoryConfig.PeerType == 0 { factoryConfig.PeerType = securekeyx.Simplenode } + addr, err := keyringpkg.GetAddress(keyring, factoryConfig.KeyName) + if err != nil { + return nil, fmt.Errorf("resolve signer address: %w", err) + } + // Create client credentials clientCreds, err := ltc.NewClientCreds(<c.ClientOptions{ CommonOptions: ltc.CommonOptions{ Keyring: keyring, - LocalIdentity: factoryConfig.LocalCosmosAddress, + LocalIdentity: addr.String(), PeerType: factoryConfig.PeerType, Validator: lumeraClient, }, @@ -78,7 +81,7 @@ func NewSupernodeClient(ctx context.Context, logger log.Logger, keyring keyring. logger.Debug(ctx, "Preparing to connect to supernode securely", "endpoint", targetSupernode.GrpcEndpoint, "target_id", targetSupernode.CosmosAddress, - "local_id", factoryConfig.LocalCosmosAddress, "peer_type", factoryConfig.PeerType) + "local_id", addr.String(), "peer_type", factoryConfig.PeerType) // Use provided client options or defaults options := clientOptions diff --git a/sdk/task/cascade.go b/sdk/task/cascade.go index eb52a380..c467e339 100644 --- a/sdk/task/cascade.go +++ b/sdk/task/cascade.go @@ -55,7 +55,12 @@ func (t *CascadeTask) Run(ctx context.Context) error { // 2 - Pre-filter: balance & health concurrently -> XOR rank, then hand over originalCount := len(supernodes) - supernodes, preClients := t.filterEligibleSupernodesParallel(ctx, supernodes) + supernodes, preClients, err := t.filterEligibleSupernodesParallel(ctx, supernodes) + if err != nil { + t.LogEvent(ctx, event.SDKTaskFailed, "Task failed during pre-filtering", event.EventData{event.KeyError: err.Error()}) + return err + } + supernodes = t.orderByXORDistance(supernodes) t.LogEvent(ctx, event.SDKSupernodesFound, "Supernodes filtered", event.EventData{event.KeyTotal: originalCount, event.KeyCount: len(supernodes)}) @@ -72,10 +77,14 @@ func (t *CascadeTask) Run(ctx context.Context) error { func (t *CascadeTask) registerWithSupernodes(ctx context.Context, supernodes lumera.Supernodes, preClients map[string]net.SupernodeClient) error { factoryCfg := net.FactoryConfig{ - LocalCosmosAddress: t.config.Account.LocalCosmosAddress, - PeerType: t.config.Account.PeerType, + KeyName: t.config.Account.KeyName, + PeerType: t.config.Account.PeerType, + } + clientFactory, err := net.NewClientFactory(ctx, t.logger, t.keyring, t.client, factoryCfg) + if err != nil { + t.LogEvent(ctx, event.SDKTaskFailed, "Failed to create client factory", event.EventData{event.KeyError: err.Error()}) + return fmt.Errorf("failed to create client factory: %w", err) } - clientFactory := net.NewClientFactory(ctx, t.logger, t.keyring, t.client, factoryCfg) req := &supernodeservice.CascadeSupernodeRegisterRequest{ FilePath: t.filePath, diff --git a/sdk/task/download.go b/sdk/task/download.go index c9a5a4b1..bc8ca06f 100644 --- a/sdk/task/download.go +++ b/sdk/task/download.go @@ -45,7 +45,12 @@ func (t *CascadeDownloadTask) Run(ctx context.Context) error { } // 2 - Pre-filter: balance & health concurrently -> XOR rank originalCount := len(supernodes) - supernodes, preClients := t.filterEligibleSupernodesParallel(ctx, supernodes) + supernodes, preClients, err := t.filterEligibleSupernodesParallel(ctx, supernodes) + if err != nil { + t.LogEvent(ctx, event.SDKTaskFailed, "task failed during pre-filtering", event.EventData{event.KeyError: err.Error()}) + return err + } + supernodes = t.orderByXORDistance(supernodes) t.LogEvent(ctx, event.SDKSupernodesFound, "super-nodes filtered", event.EventData{event.KeyTotal: originalCount, event.KeyCount: len(supernodes)}) @@ -61,10 +66,14 @@ func (t *CascadeDownloadTask) Run(ctx context.Context) error { func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supernodes lumera.Supernodes, preClients map[string]net.SupernodeClient) error { factoryCfg := net.FactoryConfig{ - LocalCosmosAddress: t.config.Account.LocalCosmosAddress, - PeerType: t.config.Account.PeerType, + KeyName: t.config.Account.KeyName, + PeerType: t.config.Account.PeerType, + } + clientFactory, err := net.NewClientFactory(ctx, t.logger, t.keyring, t.client, factoryCfg) + if err != nil { + t.LogEvent(ctx, event.SDKTaskFailed, "Failed to create client factory", event.EventData{event.KeyError: err.Error()}) + return fmt.Errorf("failed to create client factory: %w", err) } - clientFactory := net.NewClientFactory(ctx, t.logger, t.keyring, t.client, factoryCfg) // Ensure any unused preClients are closed when we return defer func() { diff --git a/sdk/task/task.go b/sdk/task/task.go index cfa8c4ca..a8423427 100644 --- a/sdk/task/task.go +++ b/sdk/task/task.go @@ -115,9 +115,9 @@ func (t *BaseTask) orderByXORDistance(sns lumera.Supernodes) lumera.Supernodes { // - For each node, run Health (incl. dial) and Balance concurrently under one timeout. // - Early-cancel sibling work on definitive failure to save time. // - Reuse healthy client connections during registration to skip a second dial. -func (t *BaseTask) filterEligibleSupernodesParallel(parent context.Context, sns lumera.Supernodes) (lumera.Supernodes, map[string]net.SupernodeClient) { +func (t *BaseTask) filterEligibleSupernodesParallel(parent context.Context, sns lumera.Supernodes) (lumera.Supernodes, map[string]net.SupernodeClient, error) { if len(sns) == 0 { - return sns, nil + return sns, nil, nil } // Step 0 — shared state for this pass @@ -133,10 +133,14 @@ func (t *BaseTask) filterEligibleSupernodesParallel(parent context.Context, sns denom := txmod.DefaultFeeDenom factoryCfg := net.FactoryConfig{ - LocalCosmosAddress: t.config.Account.LocalCosmosAddress, - PeerType: t.config.Account.PeerType, + KeyName: t.config.Account.KeyName, + PeerType: t.config.Account.PeerType, + } + clientFactory, err := net.NewClientFactory(parent, t.logger, t.keyring, t.client, factoryCfg) + if err != nil { + t.LogEvent(parent, event.SDKTaskFailed, "Failed to create client factory", event.EventData{event.KeyError: err.Error()}) + return nil, nil, fmt.Errorf("failed to create client factory: %w", err) } - clientFactory := net.NewClientFactory(parent, t.logger, t.keyring, t.client, factoryCfg) // Step 1 — spawn bounded goroutines, one per supernode for i, sn := range sns { @@ -273,5 +277,5 @@ func (t *BaseTask) filterEligibleSupernodesParallel(parent context.Context, sns out = append(out, sn) } } - return out, preClients + return out, preClients, nil } diff --git a/tests/system/e2e_cascade_test.go b/tests/system/e2e_cascade_test.go index b9af06d2..41ed525a 100644 --- a/tests/system/e2e_cascade_test.go +++ b/tests/system/e2e_cascade_test.go @@ -272,7 +272,7 @@ func TestCascadeE2E(t *testing.T) { // Build action client for metadata generation and cascade operations // Use the same account that submits RequestAction so signatures match the on-chain creator - accConfig := sdkconfig.AccountConfig{LocalCosmosAddress: userAddress, KeyName: userKeyName, Keyring: keplrKeyring} + accConfig := sdkconfig.AccountConfig{KeyName: userKeyName, Keyring: keplrKeyring} lumraConfig := sdkconfig.LumeraConfig{GRPCAddr: lumeraGRPCAddr, ChainID: lumeraChainID} actionConfig := sdkconfig.Config{Account: accConfig, Lumera: lumraConfig} actionClient, err := action.NewClient(context.Background(), actionConfig, nil) @@ -297,7 +297,10 @@ func TestCascadeE2E(t *testing.T) { t.Logf("Requesting cascade action with metadata: %s", metadata) t.Logf("Action type: %s, Price: %s, Expiration: %s", actionType, autoPrice, expirationTime) - response, _ := lumeraClinet.ActionMsg().RequestAction(ctx, actionType, metadata, autoPrice, expirationTime) + response, err := lumeraClinet.ActionMsg().RequestAction(ctx, actionType, metadata, autoPrice, expirationTime) + require.NoError(t, err, "RequestAction failed") + + require.NotNil(t, resp, "RequestAction returned nil response") txresp := response.TxResponse @@ -356,38 +359,50 @@ func TestCascadeE2E(t *testing.T) { // Step 9: Subscribe to all events and extract tx hash // --------------------------------------- - // Channels to receive async signals - txHashCh := make(chan string, 1) - completionCh := make(chan bool, 1) - errCh := make(chan string, 1) - - // Subscribe to ALL events (non-blocking sends to avoid handler stalls) - err = actionClient.SubscribeToAllEvents(context.Background(), func(ctx context.Context, e event.Event) { - // Log every event for debugging and capture key ones - t.Logf("SDK event: type=%s data=%v", e.Type, e.Data) - // Only capture TxhasReceived events - if e.Type == event.SDKTaskTxHashReceived { - if txHash, ok := e.Data[event.KeyTxHash].(string); ok && txHash != "" { - // Non-blocking send; drop if buffer full - select { case txHashCh <- txHash: default: } - } - } - - // Also monitor for task completion - if e.Type == event.SDKTaskCompleted { - // Non-blocking send; drop if buffer full - select { case completionCh <- true: default: } - } - // Capture task failures and propagate error message to main goroutine - if e.Type == event.SDKTaskFailed { - if msg, ok := e.Data[event.KeyError].(string); ok && msg != "" { - select { case errCh <- msg: default: } - } else { - select { case errCh <- "task failed (no error message)" : default: } - } - } - }) - require.NoError(t, err, "Failed to subscribe to events") + // Channels to receive async signals + txHashCh := make(chan string, 1) + completionCh := make(chan bool, 1) + errCh := make(chan string, 1) + + // Subscribe to ALL events (non-blocking sends to avoid handler stalls) + err = actionClient.SubscribeToAllEvents(context.Background(), func(ctx context.Context, e event.Event) { + // Log every event for debugging and capture key ones + t.Logf("SDK event: type=%s data=%v", e.Type, e.Data) + // Only capture TxhasReceived events + if e.Type == event.SDKTaskTxHashReceived { + if txHash, ok := e.Data[event.KeyTxHash].(string); ok && txHash != "" { + // Non-blocking send; drop if buffer full + select { + case txHashCh <- txHash: + default: + } + } + } + + // Also monitor for task completion + if e.Type == event.SDKTaskCompleted { + // Non-blocking send; drop if buffer full + select { + case completionCh <- true: + default: + } + } + // Capture task failures and propagate error message to main goroutine + if e.Type == event.SDKTaskFailed { + if msg, ok := e.Data[event.KeyError].(string); ok && msg != "" { + select { + case errCh <- msg: + default: + } + } else { + select { + case errCh <- "task failed (no error message)": + default: + } + } + } + }) + require.NoError(t, err, "Failed to subscribe to events") // Start cascade operation @@ -402,26 +417,28 @@ func TestCascadeE2E(t *testing.T) { require.NoError(t, err, "Failed to start cascade operation") t.Logf("Cascade operation started with task ID: %s", taskID) - // Wait for both tx-hash and completion with a timeout - var recievedhash string - done := false - timeout := time.After(2 * time.Minute) + // Wait for both tx-hash and completion with a timeout + var recievedhash string + done := false + timeout := time.After(2 * time.Minute) waitLoop: - for { - if recievedhash != "" && done { - break waitLoop - } - select { - case h := <-txHashCh: - if recievedhash == "" { recievedhash = h } - case <-completionCh: - done = true - case emsg := <-errCh: - t.Fatalf("cascade task reported failure: %s", emsg) - case <-timeout: - t.Fatalf("timeout waiting for events; recievedhash=%q done=%v", recievedhash, done) - } - } + for { + if recievedhash != "" && done { + break waitLoop + } + select { + case h := <-txHashCh: + if recievedhash == "" { + recievedhash = h + } + case <-completionCh: + done = true + case emsg := <-errCh: + t.Fatalf("cascade task reported failure: %s", emsg) + case <-timeout: + t.Fatalf("timeout waiting for events; recievedhash=%q done=%v", recievedhash, done) + } + } t.Logf("Received transaction hash: %s", recievedhash)