Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updated protos and refactored code to work with new protos #14

Merged
merged 4 commits into from
Dec 14, 2022

Conversation

waeljammal
Copy link
Collaborator

No description provided.

@waeljammal
Copy link
Collaborator Author

go-cover-view

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/codec.go
   1: package spark_v1
   2:
   3: import (
   4: 	"encoding/json"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: )
   7:
O  8: func MarshalBinary(data interface{}) ([]byte, error) {
O  9: 	return json.Marshal(data)
O 10: }
  11:
O 12: func UnmarshalBinaryTo(data []byte, out interface{}, mimeType string) error {
O 13: 	if mimeType == "" {
O 14: 		return sparkv1.SerdesMap[MimeTypeJSON].Unmarshal(data, &out)
O 15: 	} else {
O 16: 		return sparkv1.SerdesMap[mimeType].Unmarshal(data, &out)
O 17: 	}
  18: }
  19:
O 20: func ConvertBytes(data []byte, mimeType string) (out []byte, err error) {
O 21: 	var value interface{}
O 22: 	err = UnmarshalBinaryTo(data, &value, mimeType)
X 23: 	if err != nil {
X 24: 		return
X 25: 	}
  26:
O 27: 	switch v := value.(type) {
O 28: 	case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64:
O 29: 		return data, nil
O 30: 	case string:
O 31: 		return []byte(v), nil
X 32: 	default:
X 33: 		err = UnmarshalBinaryTo(data, &out, mimeType)
  34: 	}
  35:
X 36: 	return
  37: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/errors.go
    1: package spark_v1
    2:
    3: import (
    4: 	"encoding/json"
    5: 	"errors"
    6: 	"fmt"
    7: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    8: 	"google.golang.org/protobuf/types/known/structpb"
    9: 	"time"
   10: )
   11:
   12: /************************************************************************/
   13: // TYPES
   14: /************************************************************************/
   15:
   16: type ErrorOption = func(err *stageError) *stageError
   17:
   18: type stageError struct {
   19: 	err       error
   20: 	errorType sparkv1.ErrorType
   21: 	errorCode uint32
   22: 	metadata  map[string]any
   23: 	retry     *RetryConfig
   24: }
   25:
   26: type RetryConfig struct {
   27: 	times         uint
   28: 	backoffMillis uint
   29: }
   30:
   31: /************************************************************************/
   32: // ERRORS
   33: /************************************************************************/
   34:
   35: var (
   36: 	ErrStageDoesNotExist        = errors.New("stage does not exists")
   37: 	ErrBindValueFailed          = errors.New("bind value failed")
   38: 	ErrVariableNotFound         = errors.New("variable not found")
   39: 	ErrStageNotFoundInNodeChain = errors.New("stage not found in the node chain")
   40: 	ErrConditionalStageSkipped  = errors.New("conditional stage execution")
   41: 	ErrChainIsNotValid          = errors.New("chain is not valid")
   42: 	ErrInputVariableNotFound    = errors.New("input variable not found")
   43:
   44: 	errorTypeToStageStatusMapper = map[sparkv1.ErrorType]sparkv1.StageStatus{
   45: 		sparkv1.ErrorType_ERROR_TYPE_RETRY:              sparkv1.StageStatus_STAGE_FAILED,
   46: 		sparkv1.ErrorType_ERROR_TYPE_SKIP:               sparkv1.StageStatus_STAGE_SKIPPED,
   47: 		sparkv1.ErrorType_ERROR_TYPE_CANCELLED:          sparkv1.StageStatus_STAGE_CANCELED,
   48: 		sparkv1.ErrorType_ERROR_TYPE_FAILED_UNSPECIFIED: sparkv1.StageStatus_STAGE_FAILED,
   49: 	}
   50: )
   51:
   52: /************************************************************************/
   53: // ERROR FACTORIES
   54: /************************************************************************/
   55:
X  56: func newErrStageNotFoundInNodeChain(stage string) error {
X  57: 	return fmt.Errorf("%w: %s", ErrStageNotFoundInNodeChain, stage)
X  58: }
   59:
X  60: func newErrConditionalStageSkipped(stageName string) error {
X  61: 	return fmt.Errorf("%w: stage '%s' skipped", ErrConditionalStageSkipped, stageName)
X  62: }
   63:
O  64: func NewStageError(err error, opts ...ErrorOption) StageError {
O  65: 	stg := &stageError{err: err}
O  66: 	for _, opt := range opts {
O  67: 		stg = opt(stg)
O  68: 	}
O  69: 	return stg
   70: }
   71:
   72: /************************************************************************/
   73: // STAGE ERROR ENVELOPE
   74: /************************************************************************/
   75:
O  76: func (s *stageError) ErrorType() sparkv1.ErrorType {
O  77: 	return s.errorType
O  78: }
   79:
O  80: func (s *stageError) Code() uint32 {
O  81: 	return s.errorCode
O  82: }
   83:
O  84: func (s *stageError) Error() string {
O  85: 	return s.err.Error()
O  86: }
   87:
O  88: func (s *stageError) Metadata() map[string]any {
O  89: 	return s.metadata
O  90: }
   91:
O  92: func (s *stageError) ToErrorMessage() *sparkv1.Error {
O  93: 	err := &sparkv1.Error{
O  94: 		Error:     s.err.Error(),
O  95: 		ErrorCode: s.errorCode,
O  96: 		ErrorType: s.errorType,
O  97: 	}
O  98: 	if s.metadata != nil {
O  99: 		err.Metadata, _ = structpb.NewValue(s.metadata)
O 100: 	}
X 101: 	if s.retry != nil {
X 102: 		err.Retry = &sparkv1.RetryStrategy{Backoff: uint32(s.retry.backoffMillis), Count: uint32(s.retry.times)}
X 103: 	}
O 104: 	return err
  105: }
  106:
  107: /************************************************************************/
  108: // STAGE ERROR OPTIONS
  109: /************************************************************************/
  110:
X 111: func WithErrorCode(code uint32) ErrorOption {
X 112: 	return func(err *stageError) *stageError {
X 113: 		err.errorCode = code
X 114: 		return err
X 115: 	}
  116: }
  117:
X 118: func WithMetadata(metadata any) ErrorOption {
X 119: 	return func(err *stageError) *stageError {
X 120: 		err.parseMetadata(metadata)
X 121: 		return err
X 122: 	}
  123: }
  124:
X 125: func WithRetry(times uint, backoffMillis time.Duration) ErrorOption {
X 126: 	return func(err *stageError) *stageError {
X 127: 		err.retry = &RetryConfig{times, uint(backoffMillis.Milliseconds())}
X 128: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_RETRY
X 129: 		return err
X 130: 	}
  131: }
  132:
O 133: func WithSkip() ErrorOption {
O 134: 	return func(err *stageError) *stageError {
O 135: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_SKIP
O 136: 		return err
O 137: 	}
  138: }
  139:
O 140: func WithCancel() ErrorOption {
O 141: 	return func(err *stageError) *stageError {
O 142: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_CANCELLED
O 143: 		err.metadata = map[string]any{"reason": "canceled in stage"}
O 144: 		return err
O 145: 	}
  146: }
  147:
O 148: func WithFatal() ErrorOption {
O 149: 	return func(err *stageError) *stageError {
O 150: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_FATAL
O 151: 		return err
O 152: 	}
  153: }
  154:
O 155: func withErrorType(errorType sparkv1.ErrorType) ErrorOption {
O 156: 	return func(err *stageError) *stageError {
O 157: 		err.errorType = errorType
O 158: 		return err
O 159: 	}
  160: }
  161:
X 162: func (s *stageError) parseMetadata(metadata any) {
X 163: 	m := map[string]any{}
X 164: 	if metadata != nil {
X 165: 		mdBytes, _ := json.Marshal(metadata)
X 166: 		_ = json.Unmarshal(mdBytes, &m)
X 167: 	}
X 168: 	s.metadata = m
  169: }
  170:
  171: /************************************************************************/
  172: // HELPERS
  173: /************************************************************************/
  174:
O 175: func errorTypeToStageStatus(errType sparkv1.ErrorType) sparkv1.StageStatus {
O 176: 	if err, ok := errorTypeToStageStatusMapper[errType]; ok {
O 177: 		return err
O 178: 	}
O 179: 	return sparkv1.StageStatus_STAGE_FAILED
  180: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/executor.go
    1: package spark_v1
    2:
    3: import sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    4:
    5: const (
    6: 	stageLogField  = "stage"
    7: 	jobKeyLogField = "job_key"
    8: )
    9:
O  10: func (c *chain) execute(ctx SparkContext) StageError {
O  11: 	n, err := c.getNodeToResume(ctx.LastActiveStage())
X  12: 	if err != nil {
X  13: 		return NewStageError(err)
X  14: 	}
O  15: 	return c.runner(ctx, n)
   16: }
   17:
   18: //nolint:cyclop
O  19: func (c *chain) runner(ctx SparkContext, node *node) StageError {
O  20: 	stages := getStagesToResume(node, ctx.LastActiveStage())
O  21: 	for _, stg := range stages {
O  22: 		select {
X  23: 		case <-ctx.Ctx().Done():
X  24: 			return nil
O  25: 		default:
O  26: 			ctx.Log().AddFields(stageLogField, stg.name).AddFields(jobKeyLogField, ctx.JobKey())
O  27:
X  28: 			if err := stg.ApplyConditionalExecutionOptions(ctx, stg.name); err != nil {
X  29: 				er := updateStage(ctx, stg.name, withStageError(err))
X  30: 				if er != nil {
X  31: 					ctx.Log().Error(er, "error updating stage status to 'started'")
X  32: 					return NewStageError(er)
X  33: 				}
X  34: 				continue
   35: 			}
   36:
O  37: 			er := updateStage(ctx, stg.name, withStageStatus(sparkv1.StageStatus_STAGE_STARTED))
O  38:
X  39: 			if er != nil {
X  40: 				ctx.Log().Error(er, "error updating stage status to 'started'")
X  41: 				return NewStageError(er)
X  42: 			}
   43:
O  44: 			var result any
O  45: 			var stageErr StageError
O  46:
O  47: 			// stage execution is delegated in which case call the delegate
O  48: 			// instead and expect that it will invoke the stage and return a result, error
O  49: 			if ctx.delegateStage() != nil {
O  50: 				result, stageErr = ctx.delegateStage()(NewStageContext(ctx, stg.name), stg.cb)
O  51: 			} else {
O  52: 				result, stageErr = stg.cb(NewStageContext(ctx, stg.name))
O  53: 			}
   54:
O  55: 			if err := c.handleStageError(ctx, node, stg, stageErr); err != nil {
O  56: 				if err.ErrorType() == sparkv1.ErrorType_ERROR_TYPE_SKIP {
O  57: 					continue
   58: 				}
O  59: 				return err
   60: 			}
   61:
X  62: 			if err := storeStageResult(ctx, stg, result); err != nil {
X  63: 				return err
X  64: 			}
   65:
X  66: 			if err := updateStage(ctx, stg.name, withStageStatus(sparkv1.StageStatus_STAGE_COMPLETED)); err != nil {
X  67: 				ctx.Log().Error(err, "error setting the stage status to 'completed'")
X  68: 				return NewStageError(err)
X  69: 			}
   70: 		}
   71: 	}
   72:
O  73: 	select {
X  74: 	case <-ctx.Ctx().Done():
X  75: 		return nil
O  76: 	default:
O  77: 		if node.complete != nil {
X  78: 			if er := updateStage(ctx, node.complete.name, withStageStatus(sparkv1.StageStatus_STAGE_STARTED)); er != nil {
X  79: 				ctx.Log().Error(er, "error setting the completed stage status to 'started'")
X  80: 				return NewStageError(er)
X  81: 			}
   82:
O  83: 			var stageErr StageError
O  84:
O  85: 			if ctx.delegateComplete() != nil {
O  86: 				stageErr = ctx.delegateComplete()(NewCompleteContext(ctx, node.complete.name), node.complete.cb)
O  87: 			} else {
O  88: 				stageErr = node.complete.cb(NewCompleteContext(ctx, node.complete.name))
O  89: 			}
   90:
X  91: 			if e := updateStage(ctx, node.complete.name, withStageStatusOrError(sparkv1.StageStatus_STAGE_COMPLETED, stageErr)); e != nil {
X  92: 				ctx.Log().Error(e, "error setting the completed stage status to 'completed'")
X  93: 				return NewStageError(e)
X  94: 			}
O  95: 			return stageErr
   96: 		}
   97: 	}
   98:
X  99: 	return nil
  100: }
  101:
  102: //nolint:cyclop
O 103: func (c *chain) handleStageError(ctx SparkContext, node *node, stg *stage, err StageError) StageError {
O 104: 	if err == nil {
O 105: 		return nil
O 106: 	}
  107:
X 108: 	if e := updateStage(ctx, stg.name, withStageError(err)); e != nil {
X 109: 		ctx.Log().Error(err, "error updating stage status")
X 110: 		return NewStageError(e)
X 111: 	}
  112:
O 113: 	switch err.ErrorType() {
O 114: 	case sparkv1.ErrorType_ERROR_TYPE_FAILED_UNSPECIFIED:
O 115: 		if node.compensate != nil {
O 116: 			e := c.runner(ctx.WithoutLastActiveStage(), node.compensate)
X 117: 			if e != nil {
X 118: 				return e
X 119: 			}
  120: 		}
O 121: 		return err
O 122: 	case sparkv1.ErrorType_ERROR_TYPE_CANCELLED:
O 123: 		if node.cancel != nil {
O 124: 			e := c.runner(ctx.WithoutLastActiveStage(), node.cancel)
X 125: 			if e != nil {
X 126: 				return e
X 127: 			}
  128: 		}
O 129: 		return err
X 130: 	case sparkv1.ErrorType_ERROR_TYPE_RETRY:
X 131: 		return err
O 132: 	case sparkv1.ErrorType_ERROR_TYPE_SKIP:
O 133: 		return err
O 134: 	case sparkv1.ErrorType_ERROR_TYPE_FATAL:
O 135: 		fallthrough
O 136: 	default:
O 137: 		ctx.Log().Error(err, "unsupported error type returned from stage '%s'", stg.name)
O 138: 		return NewStageError(err, withErrorType(sparkv1.ErrorType_ERROR_TYPE_FATAL))
  139: 	}
  140: }
  141:
O 142: func storeStageResult(ctx SparkContext, stg *stage, result any) StageError {
O 143: 	if result != nil { //nolint:nestif
O 144: 		req, err := newSetStageResultReq(ctx.JobKey(), stg.name, result)
X 145: 		if err != nil {
X 146: 			ctx.Log().Error(err, "error creating set stage status request")
X 147: 			if e := updateStage(ctx, stg.name, withError(err)); e != nil {
X 148: 				ctx.Log().Error(err, "error updating stage status")
X 149: 				return NewStageError(e)
X 150: 			}
X 151: 			return NewStageError(err)
  152: 		}
X 153: 		if err := ctx.StageProgressHandler().SetResult(req); err != nil {
X 154: 			ctx.Log().Error(err, "error on set stage status request")
X 155: 			if e := updateStage(ctx, stg.name, withError(err)); e != nil {
X 156: 				ctx.Log().Error(err, "error updating stage status")
X 157: 				return NewStageError(e)
X 158: 			}
X 159: 			return NewStageError(err)
  160: 		}
  161: 	}
O 162: 	return nil
  163: }
  164:
  165: type updateStageOption = func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest
  166:
O 167: func withStageStatusOrError(status sparkv1.StageStatus, err StageError) updateStageOption {
O 168: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
X 169: 		if err != nil {
X 170: 			return withStageError(err)(stage)
X 171: 		}
O 172: 		return withStageStatus(status)(stage)
  173: 	}
  174: }
  175:
O 176: func withStageStatus(status sparkv1.StageStatus) updateStageOption {
O 177: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
O 178: 		stage.Status = status
O 179: 		return stage
O 180: 	}
  181: }
  182:
O 183: func withStageError(err StageError) updateStageOption {
O 184: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
X 185: 		if err == nil {
X 186: 			return stage
X 187: 		}
O 188: 		stage.Status = errorTypeToStageStatus(err.ErrorType())
O 189: 		stage.Err = err.ToErrorMessage()
O 190: 		return stage
  191: 	}
  192: }
  193:
X 194: func withError(err error) updateStageOption {
X 195: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
X 196: 		if err == nil {
X 197: 			return stage
X 198: 		}
X 199: 		stage.Status = sparkv1.StageStatus_STAGE_FAILED
X 200: 		stage.Err = NewStageError(err).ToErrorMessage()
X 201: 		return stage
  202: 	}
  203: }
  204:
O 205: func updateStage(ctx SparkContext, name string, opts ...updateStageOption) error {
O 206: 	req := &sparkv1.SetStageStatusRequest{Key: ctx.JobKey(), Name: name}
O 207: 	for _, opt := range opts {
O 208: 		req = opt(req)
O 209: 	}
O 210: 	return ctx.StageProgressHandler().Set(req)
  211: }
  212:
O 213: func getStagesToResume(n *node, lastActiveStage *sparkv1.LastActiveStage) []*stage {
O 214: 	if lastActiveStage == nil {
O 215: 		return n.stages
O 216: 	}
X 217: 	var stages []*stage
X 218: 	for idx, stg := range n.stages {
X 219: 		if stg.name == lastActiveStage.Name {
X 220: 			stages = append(stages, n.stages[idx:]...)
X 221: 		}
  222: 	}
X 223: 	return stages
  224: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/extensions.go
    1: package spark_v1
    2:
    3: import (
    4: 	"fmt"
    5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    6: )
    7:
    8: /************************************************************************/
    9: // FACTORIES
   10: /************************************************************************/
   11:
O  12: func newSetStageResultReq(jobKey, name string, data interface{}) (*sparkv1.SetStageResultRequest, error) {
O  13: 	b, err := MarshalBinary(data)
O  14:
O  15: 	return &sparkv1.SetStageResultRequest{
O  16: 		Key:  jobKey,
O  17: 		Name: name,
O  18: 		Data: b,
O  19: 	}, err
O  20: }
   21:
O  22: func newVariable(name, mimeType string, value interface{}) (*sparkv1.Variable, error) {
O  23: 	pbValue, err := MarshalBinary(value)
X  24: 	if err != nil {
X  25: 		return nil, fmt.Errorf("error creating variable named '%s': %w", name, err)
X  26: 	}
O  27: 	return &sparkv1.Variable{
O  28: 		Data:     pbValue,
O  29: 		MimeType: mimeType,
O  30: 	}, nil
   31: }
   32:
X  33: func newStageResultReq(jobKey, stageName string) *sparkv1.GetStageResultRequest {
X  34: 	return &sparkv1.GetStageResultRequest{
X  35: 		Name: stageName,
X  36: 		Key:  jobKey,
X  37: 	}
X  38: }
   39:
X  40: func newSetStageStatusReq(jobKey, stageName string, status sparkv1.StageStatus, err ...*sparkv1.Error) *sparkv1.SetStageStatusRequest {
X  41: 	sssr := &sparkv1.SetStageStatusRequest{
X  42: 		Name:   stageName,
X  43: 		Key:    jobKey,
X  44: 		Status: status,
X  45: 	}
X  46: 	if len(err) > 0 {
X  47: 		sssr.Err = err[0]
X  48: 	}
X  49: 	return sssr
   50: }
   51:
X  52: func newGetVariablesRequest(jobKey string, names ...string) *sparkv1.GetInputsRequest {
X  53: 	vr := &sparkv1.GetInputsRequest{
X  54: 		Key: jobKey,
X  55: 	}
X  56: 	vr.Names = append(vr.Names, names...)
X  57: 	return vr
X  58: }
   59:
X  60: func newSetVariablesRequest(jobKey string, variables ...*Var) (*sparkv1.SetOutputsRequest, error) {
X  61: 	m := map[string]*sparkv1.Variable{}
X  62: 	for _, v := range variables {
X  63: 		variable, err := newVariable(v.Name, v.MimeType, v.Value)
X  64: 		if err != nil {
X  65: 			return nil, err
X  66: 		}
X  67: 		m[v.Name] = variable
   68: 	}
X  69: 	return &sparkv1.SetOutputsRequest{Key: jobKey, Variables: m}, nil
   70: }
   71:
X  72: func newGetStageStatusReq(jobKey, stageName string) *sparkv1.GetStageStatusRequest {
X  73: 	return &sparkv1.GetStageStatusRequest{Key: jobKey, Name: stageName}
X  74: }
   75:
   76: /************************************************************************/
   77: // INPUT
   78: /************************************************************************/
   79:
   80: type input struct {
   81: 	variable *sparkv1.Variable
   82: 	err      error
   83: }
   84:
O  85: func newInput(variable *sparkv1.Variable, err error) *input {
O  86: 	return &input{variable: variable, err: err}
O  87: }
   88:
X  89: func (i *input) String() string {
X  90: 	return string(i.variable.Data)
X  91: }
   92:
O  93: func (i *input) Raw() ([]byte, error) {
X  94: 	if i.err != nil {
X  95: 		return nil, i.err
X  96: 	}
   97:
O  98: 	return ConvertBytes(i.variable.Data, i.variable.MimeType)
   99: }
  100:
O 101: func (i *input) Bind(a interface{}) error {
X 102: 	if i.err != nil {
X 103: 		return i.err
X 104: 	}
  105:
X 106: 	if err := UnmarshalBinaryTo(i.variable.Data, a, ""); err != nil {
X 107: 		return err
X 108: 	}
  109:
O 110: 	return nil
  111: }
  112:
  113: /************************************************************************/
  114: // BATCH INPUTS
  115: /************************************************************************/
  116:
  117: type inputs struct {
  118: 	vars map[string]*sparkv1.Variable
  119: 	err  error
  120: }
  121:
O 122: func newInputs(err error, vars map[string]*sparkv1.Variable) Inputs {
O 123: 	return &inputs{vars: vars, err: err}
O 124: }
  125:
O 126: func (v inputs) Get(name string) Bindable {
O 127: 	found, ok := v.vars[name]
O 128: 	if ok {
O 129: 		return newInput(found, v.err)
O 130: 	}
X 131: 	err := v.err
X 132: 	if err == nil {
X 133: 		err = ErrInputVariableNotFound
X 134: 	}
X 135: 	return newInput(nil, v.err)
  136: }
  137:
X 138: func (v inputs) Error() error {
X 139: 	return v.err
X 140: }
  141:
  142: /************************************************************************/
  143: // STAGE RESULT
  144: /************************************************************************/
  145:
  146: type result struct {
  147: 	result *sparkv1.GetStageResultResponse
  148: 	err    error
  149: }
  150:
O 151: func newResult(err error, r *sparkv1.GetStageResultResponse) Bindable {
O 152: 	return &result{
O 153: 		result: r,
O 154: 		err:    err,
O 155: 	}
O 156: }
  157:
O 158: func (r *result) Raw() ([]byte, error) {
X 159: 	if r.err != nil {
X 160: 		return nil, r.err
X 161: 	}
  162:
O 163: 	return ConvertBytes(r.result.Data, "")
  164: }
  165:
O 166: func (r *result) Bind(a interface{}) error {
X 167: 	if r.err != nil {
X 168: 		return r.err
X 169: 	}
  170:
O 171: 	return UnmarshalBinaryTo(r.result.Data, a, "")
  172: }
  173:
X 174: func (r *result) String() string {
X 175: 	return string(r.result.GetData())
X 176: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/helpers.go
   1: package spark_v1
   2:
   3: import (
   4: 	"errors"
   5: )
   6:
O  7: var CompleteSuccess = func(ctx CompleteContext) StageError {
O  8: 	return nil
O  9: }
  10:
X 11: var CompleteError = func(ctx CompleteContext) StageError {
X 12: 	return NewStageError(errors.New("complete failed"))
X 13: }
  14:
O 15: func appendIfNotNil[T any](array []*T, items ...*T) []*T {
O 16: 	for _, item := range items {
O 17: 		if item != nil {
O 18: 			array = append(array, item)
O 19: 		}
  20: 	}
O 21: 	return array
  22: }
  23:
O 24: func addBreadcrumb(nodes ...*node) {
O 25: 	var nextNodes []*node
O 26: 	for _, n := range nodes {
O 27: 		n.cancel.appendBreadcrumb(cancelNodeType, n.breadcrumb)
O 28: 		n.compensate.appendBreadcrumb(compensateNodeType, n.breadcrumb)
O 29: 		nextNodes = appendIfNotNil(nextNodes, n.compensate, n.cancel)
O 30: 	}
O 31: 	if len(nextNodes) > 0 {
O 32: 		addBreadcrumb(nextNodes...)
O 33: 	}
  34: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/io_grpc.go
   1: package spark_v1
   2:
   3: import (
   4: 	"context"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: )
   7:
   8: type variableHandler struct {
   9: 	client sparkv1.ManagerServiceClient
  10: }
  11:
X 12: func newGrpcIOHandler(client sparkv1.ManagerServiceClient) IOHandler {
X 13: 	return variableHandler{client}
X 14: }
  15:
X 16: func (g variableHandler) Inputs(jobKey string, names ...string) Inputs {
X 17: 	variables, err := g.client.GetInputs(context.Background(), newGetVariablesRequest(jobKey, names...))
X 18: 	if err != nil {
X 19: 		return newInputs(err, nil)
X 20: 	}
  21:
X 22: 	return newInputs(err, variables.Variables)
  23: }
  24:
X 25: func (g variableHandler) Input(jobKey, name string) Input {
X 26: 	return g.Inputs(jobKey, name).Get(name)
X 27: }
  28:
X 29: func (g variableHandler) Output(jobKey string, variables ...*Var) error {
X 30: 	request, err := newSetVariablesRequest(jobKey, variables...)
X 31: 	if err != nil {
X 32: 		return err
X 33: 	}
X 34: 	_, err = g.client.SetOutputs(context.Background(), request)
X 35: 	return err
  36: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/io_memory.go
   1: package spark_v1
   2:
   3: import (
   4: 	"fmt"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: 	"testing"
   7: )
   8:
   9: type inMemoryIOHandler struct {
  10: 	variables map[string]*Var
  11: 	t         *testing.T
  12: }
  13:
O 14: func NewInMemoryIOHandler(t *testing.T) TestIOHandler {
O 15: 	i := &inMemoryIOHandler{t: t, variables: map[string]*Var{}}
O 16: 	return i
O 17: }
  18:
O 19: func (i *inMemoryIOHandler) Inputs(jobKey string, names ...string) Inputs {
O 20: 	var (
O 21: 		vars = map[string]*sparkv1.Variable{}
O 22: 		err  error
O 23: 	)
O 24: 	for _, n := range names {
O 25: 		key := i.key(jobKey, n)
O 26: 		if v, ok := i.variables[key]; ok {
O 27: 			var va *sparkv1.Variable
O 28: 			va, err = newVariable(v.Name, v.MimeType, v.Value)
O 29: 			vars[v.Name] = va
O 30: 		}
  31: 	}
X 32: 	if len(vars) == 0 {
X 33: 		i.t.Fatal("no variables found for the params: ")
X 34: 	}
O 35: 	return newInputs(err, vars)
  36: }
  37:
O 38: func (i *inMemoryIOHandler) Input(jobKey, name string) Input {
O 39: 	inputs := i.Inputs(jobKey, name)
O 40: 	return inputs.Get(name)
O 41: }
  42:
X 43: func (i *inMemoryIOHandler) Output(jobKey string, variables ...*Var) error {
X 44: 	for _, v := range variables {
X 45: 		i.variables[i.key(jobKey, v.Name)] = v
X 46: 	}
X 47: 	return nil
  48: }
  49:
O 50: func (i *inMemoryIOHandler) SetVar(jobKey string, v *Var) {
O 51: 	i.variables[i.key(jobKey, v.Name)] = v
O 52: }
  53:
O 54: func (i *inMemoryIOHandler) key(jobKey, name string) string {
O 55: 	return fmt.Sprintf("%s_%s", jobKey, name)
O 56: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/progress_grpc.go
   1: package spark_v1
   2:
   3: import (
   4: 	"context"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: )
   7:
   8: type stageProgressHandler struct {
   9: 	client sparkv1.ManagerServiceClient
  10: }
  11:
X 12: func newGrpcStageProgressHandler(client sparkv1.ManagerServiceClient) StageProgressHandler {
X 13: 	return &stageProgressHandler{client: client}
X 14: }
  15:
X 16: func (g *stageProgressHandler) Get(jobKey, name string) (*sparkv1.StageStatus, error) {
X 17: 	resp, err := g.client.GetStageStatus(context.Background(), newGetStageStatusReq(jobKey, name))
X 18: 	return &resp.Status, err
X 19: }
  20:
X 21: func (g *stageProgressHandler) Set(stageStatus *sparkv1.SetStageStatusRequest) error {
X 22: 	_, err := g.client.SetStageStatus(context.Background(), stageStatus)
X 23: 	return err
X 24: }
  25:
X 26: func (g *stageProgressHandler) GetResult(jobKey, name string) Bindable {
X 27: 	result, err := g.client.GetStageResult(context.Background(), newStageResultReq(jobKey, name))
X 28: 	if err != nil {
X 29: 		return newResult(err, nil)
X 30: 	}
X 31: 	return newResult(nil, result)
  32: }
  33:
X 34: func (g *stageProgressHandler) SetResult(result *sparkv1.SetStageResultRequest) error {
X 35: 	_, err := g.client.SetStageResult(context.Background(), result)
X 36: 	return err
X 37: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/progress_memory.go
    1: package spark_v1
    2:
    3: import (
    4: 	"fmt"
    5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    6: 	"testing"
    7:
    8: 	"github.com/stretchr/testify/assert"
    9: )
   10:
   11: type InMemoryStageProgressHandler struct {
   12: 	t                  *testing.T
   13: 	stages             map[string]*sparkv1.SetStageStatusRequest
   14: 	results            map[string]*sparkv1.SetStageResultRequest
   15: 	behaviourSet       map[string]StageBehaviourParams
   16: 	behaviourSetResult map[string]ResultBehaviourParams
   17: }
   18:
O  19: func NewInMemoryStageProgressHandler(t *testing.T, seeds ...any) TestStageProgressHandler {
O  20: 	handler := InMemoryStageProgressHandler{t,
O  21: 		map[string]*sparkv1.SetStageStatusRequest{}, map[string]*sparkv1.SetStageResultRequest{},
O  22: 		map[string]StageBehaviourParams{}, map[string]ResultBehaviourParams{}}
O  23:
O  24: 	return &handler
O  25: }
   26:
O  27: func (i *InMemoryStageProgressHandler) Get(jobKey, name string) (*sparkv1.StageStatus, error) {
O  28: 	if stage, ok := i.stages[i.key(jobKey, name)]; ok {
O  29: 		return &stage.Status, nil
O  30: 	}
X  31: 	i.t.Fatalf("stage status no found for params >> jobKey: %s, stageName: %s", jobKey, name)
X  32: 	return nil, nil
   33: }
   34:
O  35: func (i *InMemoryStageProgressHandler) Set(stageStatus *sparkv1.SetStageStatusRequest) error {
X  36: 	if bp, ok := i.behaviourSet[stageStatus.Name]; ok {
X  37: 		if bp.status == stageStatus.Status && bp.err != nil {
X  38: 			return bp.err
X  39: 		}
   40: 	}
O  41: 	i.stages[i.key(stageStatus.Key, stageStatus.Name)] = stageStatus
O  42: 	return nil
   43: }
   44:
O  45: func (i *InMemoryStageProgressHandler) GetResult(jobKey, name string) Bindable {
O  46: 	if variable, ok := i.results[i.key(jobKey, name)]; ok {
O  47: 		return newResult(nil, &sparkv1.GetStageResultResponse{
O  48: 			Data: variable.Data,
O  49: 		})
O  50: 	}
X  51: 	i.t.Fatalf("stage result not found for params >> jobKey: %s, stageName: %s", jobKey, name)
X  52: 	return nil
   53: }
   54:
O  55: func (i *InMemoryStageProgressHandler) SetResult(result *sparkv1.SetStageResultRequest) error {
X  56: 	if br, ok := i.behaviourSetResult[result.Name]; ok {
X  57: 		if br.jobKey == result.GetKey() && br.name == result.Name && br.err != nil {
X  58: 			return br.err
X  59: 		}
   60: 	}
O  61: 	i.results[i.key(result.GetKey(), result.Name)] = result
O  62: 	return nil
   63: }
   64:
X  65: func (i *InMemoryStageProgressHandler) AddBehaviour() *Behaviour {
X  66: 	return &Behaviour{i: i}
X  67: }
   68:
X  69: func (i *InMemoryStageProgressHandler) ResetBehaviour() {
X  70: 	i.behaviourSet = map[string]StageBehaviourParams{}
X  71: }
   72:
O  73: func (i *InMemoryStageProgressHandler) AssertStageCompleted(jobKey, stageName string) {
O  74: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_COMPLETED)
O  75: }
   76:
X  77: func (i *InMemoryStageProgressHandler) AssertStageStarted(jobKey, stageName string) {
X  78: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_STARTED)
X  79: }
   80:
O  81: func (i *InMemoryStageProgressHandler) AssertStageSkipped(jobKey, stageName string) {
O  82: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_SKIPPED)
O  83: }
   84:
O  85: func (i *InMemoryStageProgressHandler) AssertStageCancelled(jobKey, stageName string) {
O  86: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_CANCELED)
O  87: }
   88:
O  89: func (i *InMemoryStageProgressHandler) AssertStageFailed(jobKey, stageName string) {
O  90: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_FAILED)
O  91: }
   92:
X  93: func (i *InMemoryStageProgressHandler) AssertStageResult(jobKey, stageName string, expectedStageResult any) {
X  94: 	r := i.GetResult(jobKey, stageName)
X  95: 	resB, err := r.Raw()
X  96: 	if err != nil {
X  97: 		i.t.Error(err)
X  98: 		return
X  99: 	}
X 100: 	req, err := newSetStageResultReq(jobKey, MimeTypeJSON, expectedStageResult)
X 101: 	if err != nil {
X 102: 		i.t.Error(err)
X 103: 		return
X 104: 	}
X 105: 	assert.Equal(i.t, req.Data, resB)
  106: }
  107:
O 108: func (i *InMemoryStageProgressHandler) key(jobKey, name string) string {
O 109: 	return fmt.Sprintf("%s_%s", jobKey, name)
O 110: }
  111:
O 112: func (i *InMemoryStageProgressHandler) assertStageStatus(jobKey, stageName string, expectedStatus sparkv1.StageStatus) {
O 113: 	status, err := i.Get(jobKey, stageName)
X 114: 	if err != nil {
X 115: 		i.t.Error(err)
X 116: 		return
X 117: 	}
O 118: 	assert.Equal(i.t, &expectedStatus, status, "spark status expected: '%s' got: '%s'", expectedStatus, status)
  119: }
  120:
  121: type Behaviour struct {
  122: 	i *InMemoryStageProgressHandler
  123: }
  124:
X 125: func (b *Behaviour) Set(stageName string, status sparkv1.StageStatus, err error) *InMemoryStageProgressHandler {
X 126: 	b.i.behaviourSet[stageName] = StageBehaviourParams{err: err, status: status}
X 127: 	return b.i
X 128: }
  129:
X 130: func (b *Behaviour) SetResult(jobKey, stageName string, err error) *InMemoryStageProgressHandler {
X 131: 	b.i.behaviourSetResult[stageName] = ResultBehaviourParams{jobKey: jobKey, name: stageName, err: err}
X 132: 	return b.i
X 133: }
  134:
  135: type StageBehaviourParams struct {
  136: 	err    error
  137: 	status sparkv1.StageStatus
  138: }
  139:
  140: type ResultBehaviourParams struct {
  141: 	jobKey string
  142: 	name   string
  143: 	err    error
  144: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/server.go
   1: package spark_v1
   2:
   3: import (
   4: 	"context"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: 	"net"
   7: 	"time"
   8:
   9: 	"google.golang.org/grpc"
  10: 	"google.golang.org/grpc/reflection"
  11: )
  12:
  13: /************************************************************************/
  14: // CONSTANTS
  15: /************************************************************************/
  16:
  17: var connectionTimeout = time.Second * 10
  18:
  19: /************************************************************************/
  20: // TYPES
  21: /************************************************************************/
  22:
  23: type server struct {
  24: 	config *config
  25: 	worker Worker
  26: 	svr    *grpc.Server
  27: }
  28:
  29: /************************************************************************/
  30: // SERVER
  31: /************************************************************************/
  32:
X 33: func newServer(cfg *config, worker Worker) *server {
X 34: 	return &server{config: cfg, worker: worker}
X 35: }
  36:
X 37: func (s *server) start() error {
X 38: 	// LOGGER SAMPLE >> add .Fields(fields) with the spark name on it
X 39: 	log := NewLogger()
X 40:
X 41: 	// nosemgrep
X 42: 	s.svr = grpc.NewServer(grpc.ConnectionTimeout(connectionTimeout))
X 43: 	sparkv1.RegisterAgentServiceServer(s.svr, s)
X 44:
X 45: 	reflection.Register(s.svr)
X 46:
X 47: 	listener, err := net.Listen("tcp", s.config.serverAddress())
X 48: 	if err != nil {
X 49: 		log.Error(err, "error setting up the listener")
X 50: 		return err
X 51: 	}
  52:
  53: 	// nosemgrep
X 54: 	if err = s.svr.Serve(listener); err != nil {
X 55: 		log.Error(err, "error starting the server")
X 56: 		return err
X 57: 	}
X 58: 	return nil
  59: }
  60:
X 61: func (s *server) stop() {
X 62: 	if s.svr != nil {
X 63: 		s.svr.GracefulStop()
X 64: 	}
  65: }
  66:
  67: /************************************************************************/
  68: // RPC IMPLEMENTATIONS
  69: /************************************************************************/
  70:
X 71: func (s *server) ExecuteJob(ctx context.Context, request *sparkv1.ExecuteJobRequest) (*sparkv1.Void, error) {
X 72: 	jobContext := NewSparkMetadata(ctx, request.Key, request.CorrelationId, request.TransactionId, nil)
X 73: 	go func() { // TODO goroutine pool
X 74: 		_ = s.worker.Execute(jobContext)
X 75: 	}()
X 76: 	return &sparkv1.Void{}, nil
  77: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/variable.go
   1: package spark_v1
   2:
   3: type Var struct {
   4: 	Name     string
   5: 	MimeType string
   6: 	Value    interface{}
   7: }
   8:
O  9: func NewVar(name, mimeType string, value interface{}) *Var {
O 10: 	return &Var{name, mimeType, value}
O 11: }

1 similar comment
@waeljammal
Copy link
Collaborator Author

go-cover-view

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/codec.go
   1: package spark_v1
   2:
   3: import (
   4: 	"encoding/json"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: )
   7:
O  8: func MarshalBinary(data interface{}) ([]byte, error) {
O  9: 	return json.Marshal(data)
O 10: }
  11:
O 12: func UnmarshalBinaryTo(data []byte, out interface{}, mimeType string) error {
O 13: 	if mimeType == "" {
O 14: 		return sparkv1.SerdesMap[MimeTypeJSON].Unmarshal(data, &out)
O 15: 	} else {
O 16: 		return sparkv1.SerdesMap[mimeType].Unmarshal(data, &out)
O 17: 	}
  18: }
  19:
O 20: func ConvertBytes(data []byte, mimeType string) (out []byte, err error) {
O 21: 	var value interface{}
O 22: 	err = UnmarshalBinaryTo(data, &value, mimeType)
X 23: 	if err != nil {
X 24: 		return
X 25: 	}
  26:
O 27: 	switch v := value.(type) {
O 28: 	case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64:
O 29: 		return data, nil
O 30: 	case string:
O 31: 		return []byte(v), nil
X 32: 	default:
X 33: 		err = UnmarshalBinaryTo(data, &out, mimeType)
  34: 	}
  35:
X 36: 	return
  37: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/errors.go
    1: package spark_v1
    2:
    3: import (
    4: 	"encoding/json"
    5: 	"errors"
    6: 	"fmt"
    7: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    8: 	"google.golang.org/protobuf/types/known/structpb"
    9: 	"time"
   10: )
   11:
   12: /************************************************************************/
   13: // TYPES
   14: /************************************************************************/
   15:
   16: type ErrorOption = func(err *stageError) *stageError
   17:
   18: type stageError struct {
   19: 	err       error
   20: 	errorType sparkv1.ErrorType
   21: 	errorCode uint32
   22: 	metadata  map[string]any
   23: 	retry     *RetryConfig
   24: }
   25:
   26: type RetryConfig struct {
   27: 	times         uint
   28: 	backoffMillis uint
   29: }
   30:
   31: /************************************************************************/
   32: // ERRORS
   33: /************************************************************************/
   34:
   35: var (
   36: 	ErrStageDoesNotExist        = errors.New("stage does not exists")
   37: 	ErrBindValueFailed          = errors.New("bind value failed")
   38: 	ErrVariableNotFound         = errors.New("variable not found")
   39: 	ErrStageNotFoundInNodeChain = errors.New("stage not found in the node chain")
   40: 	ErrConditionalStageSkipped  = errors.New("conditional stage execution")
   41: 	ErrChainIsNotValid          = errors.New("chain is not valid")
   42: 	ErrInputVariableNotFound    = errors.New("input variable not found")
   43:
   44: 	errorTypeToStageStatusMapper = map[sparkv1.ErrorType]sparkv1.StageStatus{
   45: 		sparkv1.ErrorType_ERROR_TYPE_RETRY:              sparkv1.StageStatus_STAGE_FAILED,
   46: 		sparkv1.ErrorType_ERROR_TYPE_SKIP:               sparkv1.StageStatus_STAGE_SKIPPED,
   47: 		sparkv1.ErrorType_ERROR_TYPE_CANCELLED:          sparkv1.StageStatus_STAGE_CANCELED,
   48: 		sparkv1.ErrorType_ERROR_TYPE_FAILED_UNSPECIFIED: sparkv1.StageStatus_STAGE_FAILED,
   49: 	}
   50: )
   51:
   52: /************************************************************************/
   53: // ERROR FACTORIES
   54: /************************************************************************/
   55:
X  56: func newErrStageNotFoundInNodeChain(stage string) error {
X  57: 	return fmt.Errorf("%w: %s", ErrStageNotFoundInNodeChain, stage)
X  58: }
   59:
X  60: func newErrConditionalStageSkipped(stageName string) error {
X  61: 	return fmt.Errorf("%w: stage '%s' skipped", ErrConditionalStageSkipped, stageName)
X  62: }
   63:
O  64: func NewStageError(err error, opts ...ErrorOption) StageError {
O  65: 	stg := &stageError{err: err}
O  66: 	for _, opt := range opts {
O  67: 		stg = opt(stg)
O  68: 	}
O  69: 	return stg
   70: }
   71:
   72: /************************************************************************/
   73: // STAGE ERROR ENVELOPE
   74: /************************************************************************/
   75:
O  76: func (s *stageError) ErrorType() sparkv1.ErrorType {
O  77: 	return s.errorType
O  78: }
   79:
O  80: func (s *stageError) Code() uint32 {
O  81: 	return s.errorCode
O  82: }
   83:
O  84: func (s *stageError) Error() string {
O  85: 	return s.err.Error()
O  86: }
   87:
O  88: func (s *stageError) Metadata() map[string]any {
O  89: 	return s.metadata
O  90: }
   91:
O  92: func (s *stageError) ToErrorMessage() *sparkv1.Error {
O  93: 	err := &sparkv1.Error{
O  94: 		Error:     s.err.Error(),
O  95: 		ErrorCode: s.errorCode,
O  96: 		ErrorType: s.errorType,
O  97: 	}
O  98: 	if s.metadata != nil {
O  99: 		err.Metadata, _ = structpb.NewValue(s.metadata)
O 100: 	}
X 101: 	if s.retry != nil {
X 102: 		err.Retry = &sparkv1.RetryStrategy{Backoff: uint32(s.retry.backoffMillis), Count: uint32(s.retry.times)}
X 103: 	}
O 104: 	return err
  105: }
  106:
  107: /************************************************************************/
  108: // STAGE ERROR OPTIONS
  109: /************************************************************************/
  110:
X 111: func WithErrorCode(code uint32) ErrorOption {
X 112: 	return func(err *stageError) *stageError {
X 113: 		err.errorCode = code
X 114: 		return err
X 115: 	}
  116: }
  117:
X 118: func WithMetadata(metadata any) ErrorOption {
X 119: 	return func(err *stageError) *stageError {
X 120: 		err.parseMetadata(metadata)
X 121: 		return err
X 122: 	}
  123: }
  124:
X 125: func WithRetry(times uint, backoffMillis time.Duration) ErrorOption {
X 126: 	return func(err *stageError) *stageError {
X 127: 		err.retry = &RetryConfig{times, uint(backoffMillis.Milliseconds())}
X 128: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_RETRY
X 129: 		return err
X 130: 	}
  131: }
  132:
O 133: func WithSkip() ErrorOption {
O 134: 	return func(err *stageError) *stageError {
O 135: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_SKIP
O 136: 		return err
O 137: 	}
  138: }
  139:
O 140: func WithCancel() ErrorOption {
O 141: 	return func(err *stageError) *stageError {
O 142: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_CANCELLED
O 143: 		err.metadata = map[string]any{"reason": "canceled in stage"}
O 144: 		return err
O 145: 	}
  146: }
  147:
O 148: func WithFatal() ErrorOption {
O 149: 	return func(err *stageError) *stageError {
O 150: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_FATAL
O 151: 		return err
O 152: 	}
  153: }
  154:
O 155: func withErrorType(errorType sparkv1.ErrorType) ErrorOption {
O 156: 	return func(err *stageError) *stageError {
O 157: 		err.errorType = errorType
O 158: 		return err
O 159: 	}
  160: }
  161:
X 162: func (s *stageError) parseMetadata(metadata any) {
X 163: 	m := map[string]any{}
X 164: 	if metadata != nil {
X 165: 		mdBytes, _ := json.Marshal(metadata)
X 166: 		_ = json.Unmarshal(mdBytes, &m)
X 167: 	}
X 168: 	s.metadata = m
  169: }
  170:
  171: /************************************************************************/
  172: // HELPERS
  173: /************************************************************************/
  174:
O 175: func errorTypeToStageStatus(errType sparkv1.ErrorType) sparkv1.StageStatus {
O 176: 	if err, ok := errorTypeToStageStatusMapper[errType]; ok {
O 177: 		return err
O 178: 	}
O 179: 	return sparkv1.StageStatus_STAGE_FAILED
  180: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/executor.go
    1: package spark_v1
    2:
    3: import sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    4:
    5: const (
    6: 	stageLogField  = "stage"
    7: 	jobKeyLogField = "job_key"
    8: )
    9:
O  10: func (c *chain) execute(ctx SparkContext) StageError {
O  11: 	n, err := c.getNodeToResume(ctx.LastActiveStage())
X  12: 	if err != nil {
X  13: 		return NewStageError(err)
X  14: 	}
O  15: 	return c.runner(ctx, n)
   16: }
   17:
   18: //nolint:cyclop
O  19: func (c *chain) runner(ctx SparkContext, node *node) StageError {
O  20: 	stages := getStagesToResume(node, ctx.LastActiveStage())
O  21: 	for _, stg := range stages {
O  22: 		select {
X  23: 		case <-ctx.Ctx().Done():
X  24: 			return nil
O  25: 		default:
O  26: 			ctx.Log().AddFields(stageLogField, stg.name).AddFields(jobKeyLogField, ctx.JobKey())
O  27:
X  28: 			if err := stg.ApplyConditionalExecutionOptions(ctx, stg.name); err != nil {
X  29: 				er := updateStage(ctx, stg.name, withStageError(err))
X  30: 				if er != nil {
X  31: 					ctx.Log().Error(er, "error updating stage status to 'started'")
X  32: 					return NewStageError(er)
X  33: 				}
X  34: 				continue
   35: 			}
   36:
O  37: 			er := updateStage(ctx, stg.name, withStageStatus(sparkv1.StageStatus_STAGE_STARTED))
O  38:
X  39: 			if er != nil {
X  40: 				ctx.Log().Error(er, "error updating stage status to 'started'")
X  41: 				return NewStageError(er)
X  42: 			}
   43:
O  44: 			var result any
O  45: 			var stageErr StageError
O  46:
O  47: 			// stage execution is delegated in which case call the delegate
O  48: 			// instead and expect that it will invoke the stage and return a result, error
O  49: 			if ctx.delegateStage() != nil {
O  50: 				result, stageErr = ctx.delegateStage()(NewStageContext(ctx, stg.name), stg.cb)
O  51: 			} else {
O  52: 				result, stageErr = stg.cb(NewStageContext(ctx, stg.name))
O  53: 			}
   54:
O  55: 			if err := c.handleStageError(ctx, node, stg, stageErr); err != nil {
O  56: 				if err.ErrorType() == sparkv1.ErrorType_ERROR_TYPE_SKIP {
O  57: 					continue
   58: 				}
O  59: 				return err
   60: 			}
   61:
X  62: 			if err := storeStageResult(ctx, stg, result); err != nil {
X  63: 				return err
X  64: 			}
   65:
X  66: 			if err := updateStage(ctx, stg.name, withStageStatus(sparkv1.StageStatus_STAGE_COMPLETED)); err != nil {
X  67: 				ctx.Log().Error(err, "error setting the stage status to 'completed'")
X  68: 				return NewStageError(err)
X  69: 			}
   70: 		}
   71: 	}
   72:
O  73: 	select {
X  74: 	case <-ctx.Ctx().Done():
X  75: 		return nil
O  76: 	default:
O  77: 		if node.complete != nil {
X  78: 			if er := updateStage(ctx, node.complete.name, withStageStatus(sparkv1.StageStatus_STAGE_STARTED)); er != nil {
X  79: 				ctx.Log().Error(er, "error setting the completed stage status to 'started'")
X  80: 				return NewStageError(er)
X  81: 			}
   82:
O  83: 			var stageErr StageError
O  84:
O  85: 			if ctx.delegateComplete() != nil {
O  86: 				stageErr = ctx.delegateComplete()(NewCompleteContext(ctx, node.complete.name), node.complete.cb)
O  87: 			} else {
O  88: 				stageErr = node.complete.cb(NewCompleteContext(ctx, node.complete.name))
O  89: 			}
   90:
X  91: 			if e := updateStage(ctx, node.complete.name, withStageStatusOrError(sparkv1.StageStatus_STAGE_COMPLETED, stageErr)); e != nil {
X  92: 				ctx.Log().Error(e, "error setting the completed stage status to 'completed'")
X  93: 				return NewStageError(e)
X  94: 			}
O  95: 			return stageErr
   96: 		}
   97: 	}
   98:
X  99: 	return nil
  100: }
  101:
  102: //nolint:cyclop
O 103: func (c *chain) handleStageError(ctx SparkContext, node *node, stg *stage, err StageError) StageError {
O 104: 	if err == nil {
O 105: 		return nil
O 106: 	}
  107:
X 108: 	if e := updateStage(ctx, stg.name, withStageError(err)); e != nil {
X 109: 		ctx.Log().Error(err, "error updating stage status")
X 110: 		return NewStageError(e)
X 111: 	}
  112:
O 113: 	switch err.ErrorType() {
O 114: 	case sparkv1.ErrorType_ERROR_TYPE_FAILED_UNSPECIFIED:
O 115: 		if node.compensate != nil {
O 116: 			e := c.runner(ctx.WithoutLastActiveStage(), node.compensate)
X 117: 			if e != nil {
X 118: 				return e
X 119: 			}
  120: 		}
O 121: 		return err
O 122: 	case sparkv1.ErrorType_ERROR_TYPE_CANCELLED:
O 123: 		if node.cancel != nil {
O 124: 			e := c.runner(ctx.WithoutLastActiveStage(), node.cancel)
X 125: 			if e != nil {
X 126: 				return e
X 127: 			}
  128: 		}
O 129: 		return err
X 130: 	case sparkv1.ErrorType_ERROR_TYPE_RETRY:
X 131: 		return err
O 132: 	case sparkv1.ErrorType_ERROR_TYPE_SKIP:
O 133: 		return err
O 134: 	case sparkv1.ErrorType_ERROR_TYPE_FATAL:
O 135: 		fallthrough
O 136: 	default:
O 137: 		ctx.Log().Error(err, "unsupported error type returned from stage '%s'", stg.name)
O 138: 		return NewStageError(err, withErrorType(sparkv1.ErrorType_ERROR_TYPE_FATAL))
  139: 	}
  140: }
  141:
O 142: func storeStageResult(ctx SparkContext, stg *stage, result any) StageError {
O 143: 	if result != nil { //nolint:nestif
O 144: 		req, err := newSetStageResultReq(ctx.JobKey(), stg.name, result)
X 145: 		if err != nil {
X 146: 			ctx.Log().Error(err, "error creating set stage status request")
X 147: 			if e := updateStage(ctx, stg.name, withError(err)); e != nil {
X 148: 				ctx.Log().Error(err, "error updating stage status")
X 149: 				return NewStageError(e)
X 150: 			}
X 151: 			return NewStageError(err)
  152: 		}
X 153: 		if err := ctx.StageProgressHandler().SetResult(req); err != nil {
X 154: 			ctx.Log().Error(err, "error on set stage status request")
X 155: 			if e := updateStage(ctx, stg.name, withError(err)); e != nil {
X 156: 				ctx.Log().Error(err, "error updating stage status")
X 157: 				return NewStageError(e)
X 158: 			}
X 159: 			return NewStageError(err)
  160: 		}
  161: 	}
O 162: 	return nil
  163: }
  164:
  165: type updateStageOption = func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest
  166:
O 167: func withStageStatusOrError(status sparkv1.StageStatus, err StageError) updateStageOption {
O 168: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
X 169: 		if err != nil {
X 170: 			return withStageError(err)(stage)
X 171: 		}
O 172: 		return withStageStatus(status)(stage)
  173: 	}
  174: }
  175:
O 176: func withStageStatus(status sparkv1.StageStatus) updateStageOption {
O 177: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
O 178: 		stage.Status = status
O 179: 		return stage
O 180: 	}
  181: }
  182:
O 183: func withStageError(err StageError) updateStageOption {
O 184: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
X 185: 		if err == nil {
X 186: 			return stage
X 187: 		}
O 188: 		stage.Status = errorTypeToStageStatus(err.ErrorType())
O 189: 		stage.Err = err.ToErrorMessage()
O 190: 		return stage
  191: 	}
  192: }
  193:
X 194: func withError(err error) updateStageOption {
X 195: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
X 196: 		if err == nil {
X 197: 			return stage
X 198: 		}
X 199: 		stage.Status = sparkv1.StageStatus_STAGE_FAILED
X 200: 		stage.Err = NewStageError(err).ToErrorMessage()
X 201: 		return stage
  202: 	}
  203: }
  204:
O 205: func updateStage(ctx SparkContext, name string, opts ...updateStageOption) error {
O 206: 	req := &sparkv1.SetStageStatusRequest{Key: ctx.JobKey(), Name: name}
O 207: 	for _, opt := range opts {
O 208: 		req = opt(req)
O 209: 	}
O 210: 	return ctx.StageProgressHandler().Set(req)
  211: }
  212:
O 213: func getStagesToResume(n *node, lastActiveStage *sparkv1.LastActiveStage) []*stage {
O 214: 	if lastActiveStage == nil {
O 215: 		return n.stages
O 216: 	}
X 217: 	var stages []*stage
X 218: 	for idx, stg := range n.stages {
X 219: 		if stg.name == lastActiveStage.Name {
X 220: 			stages = append(stages, n.stages[idx:]...)
X 221: 		}
  222: 	}
X 223: 	return stages
  224: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/extensions.go
    1: package spark_v1
    2:
    3: import (
    4: 	"fmt"
    5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    6: )
    7:
    8: /************************************************************************/
    9: // FACTORIES
   10: /************************************************************************/
   11:
O  12: func newSetStageResultReq(jobKey, name string, data interface{}) (*sparkv1.SetStageResultRequest, error) {
O  13: 	b, err := MarshalBinary(data)
O  14:
O  15: 	return &sparkv1.SetStageResultRequest{
O  16: 		Key:  jobKey,
O  17: 		Name: name,
O  18: 		Data: b,
O  19: 	}, err
O  20: }
   21:
O  22: func newVariable(name, mimeType string, value interface{}) (*sparkv1.Variable, error) {
O  23: 	pbValue, err := MarshalBinary(value)
X  24: 	if err != nil {
X  25: 		return nil, fmt.Errorf("error creating variable named '%s': %w", name, err)
X  26: 	}
O  27: 	return &sparkv1.Variable{
O  28: 		Data:     pbValue,
O  29: 		MimeType: mimeType,
O  30: 	}, nil
   31: }
   32:
X  33: func newStageResultReq(jobKey, stageName string) *sparkv1.GetStageResultRequest {
X  34: 	return &sparkv1.GetStageResultRequest{
X  35: 		Name: stageName,
X  36: 		Key:  jobKey,
X  37: 	}
X  38: }
   39:
X  40: func newSetStageStatusReq(jobKey, stageName string, status sparkv1.StageStatus, err ...*sparkv1.Error) *sparkv1.SetStageStatusRequest {
X  41: 	sssr := &sparkv1.SetStageStatusRequest{
X  42: 		Name:   stageName,
X  43: 		Key:    jobKey,
X  44: 		Status: status,
X  45: 	}
X  46: 	if len(err) > 0 {
X  47: 		sssr.Err = err[0]
X  48: 	}
X  49: 	return sssr
   50: }
   51:
X  52: func newGetVariablesRequest(jobKey string, names ...string) *sparkv1.GetInputsRequest {
X  53: 	vr := &sparkv1.GetInputsRequest{
X  54: 		Key: jobKey,
X  55: 	}
X  56: 	vr.Names = append(vr.Names, names...)
X  57: 	return vr
X  58: }
   59:
X  60: func newSetVariablesRequest(jobKey string, variables ...*Var) (*sparkv1.SetOutputsRequest, error) {
X  61: 	m := map[string]*sparkv1.Variable{}
X  62: 	for _, v := range variables {
X  63: 		variable, err := newVariable(v.Name, v.MimeType, v.Value)
X  64: 		if err != nil {
X  65: 			return nil, err
X  66: 		}
X  67: 		m[v.Name] = variable
   68: 	}
X  69: 	return &sparkv1.SetOutputsRequest{Key: jobKey, Variables: m}, nil
   70: }
   71:
X  72: func newGetStageStatusReq(jobKey, stageName string) *sparkv1.GetStageStatusRequest {
X  73: 	return &sparkv1.GetStageStatusRequest{Key: jobKey, Name: stageName}
X  74: }
   75:
   76: /************************************************************************/
   77: // INPUT
   78: /************************************************************************/
   79:
   80: type input struct {
   81: 	variable *sparkv1.Variable
   82: 	err      error
   83: }
   84:
O  85: func newInput(variable *sparkv1.Variable, err error) *input {
O  86: 	return &input{variable: variable, err: err}
O  87: }
   88:
X  89: func (i *input) String() string {
X  90: 	return string(i.variable.Data)
X  91: }
   92:
O  93: func (i *input) Raw() ([]byte, error) {
X  94: 	if i.err != nil {
X  95: 		return nil, i.err
X  96: 	}
   97:
O  98: 	return ConvertBytes(i.variable.Data, i.variable.MimeType)
   99: }
  100:
O 101: func (i *input) Bind(a interface{}) error {
X 102: 	if i.err != nil {
X 103: 		return i.err
X 104: 	}
  105:
X 106: 	if err := UnmarshalBinaryTo(i.variable.Data, a, ""); err != nil {
X 107: 		return err
X 108: 	}
  109:
O 110: 	return nil
  111: }
  112:
  113: /************************************************************************/
  114: // BATCH INPUTS
  115: /************************************************************************/
  116:
  117: type inputs struct {
  118: 	vars map[string]*sparkv1.Variable
  119: 	err  error
  120: }
  121:
O 122: func newInputs(err error, vars map[string]*sparkv1.Variable) Inputs {
O 123: 	return &inputs{vars: vars, err: err}
O 124: }
  125:
O 126: func (v inputs) Get(name string) Bindable {
O 127: 	found, ok := v.vars[name]
O 128: 	if ok {
O 129: 		return newInput(found, v.err)
O 130: 	}
X 131: 	err := v.err
X 132: 	if err == nil {
X 133: 		err = ErrInputVariableNotFound
X 134: 	}
X 135: 	return newInput(nil, v.err)
  136: }
  137:
X 138: func (v inputs) Error() error {
X 139: 	return v.err
X 140: }
  141:
  142: /************************************************************************/
  143: // STAGE RESULT
  144: /************************************************************************/
  145:
  146: type result struct {
  147: 	result *sparkv1.GetStageResultResponse
  148: 	err    error
  149: }
  150:
O 151: func newResult(err error, r *sparkv1.GetStageResultResponse) Bindable {
O 152: 	return &result{
O 153: 		result: r,
O 154: 		err:    err,
O 155: 	}
O 156: }
  157:
O 158: func (r *result) Raw() ([]byte, error) {
X 159: 	if r.err != nil {
X 160: 		return nil, r.err
X 161: 	}
  162:
O 163: 	return ConvertBytes(r.result.Data, "")
  164: }
  165:
O 166: func (r *result) Bind(a interface{}) error {
X 167: 	if r.err != nil {
X 168: 		return r.err
X 169: 	}
  170:
O 171: 	return UnmarshalBinaryTo(r.result.Data, a, "")
  172: }
  173:
X 174: func (r *result) String() string {
X 175: 	return string(r.result.GetData())
X 176: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/helpers.go
   1: package spark_v1
   2:
   3: import (
   4: 	"errors"
   5: )
   6:
O  7: var CompleteSuccess = func(ctx CompleteContext) StageError {
O  8: 	return nil
O  9: }
  10:
X 11: var CompleteError = func(ctx CompleteContext) StageError {
X 12: 	return NewStageError(errors.New("complete failed"))
X 13: }
  14:
O 15: func appendIfNotNil[T any](array []*T, items ...*T) []*T {
O 16: 	for _, item := range items {
O 17: 		if item != nil {
O 18: 			array = append(array, item)
O 19: 		}
  20: 	}
O 21: 	return array
  22: }
  23:
O 24: func addBreadcrumb(nodes ...*node) {
O 25: 	var nextNodes []*node
O 26: 	for _, n := range nodes {
O 27: 		n.cancel.appendBreadcrumb(cancelNodeType, n.breadcrumb)
O 28: 		n.compensate.appendBreadcrumb(compensateNodeType, n.breadcrumb)
O 29: 		nextNodes = appendIfNotNil(nextNodes, n.compensate, n.cancel)
O 30: 	}
O 31: 	if len(nextNodes) > 0 {
O 32: 		addBreadcrumb(nextNodes...)
O 33: 	}
  34: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/io_grpc.go
   1: package spark_v1
   2:
   3: import (
   4: 	"context"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: )
   7:
   8: type variableHandler struct {
   9: 	client sparkv1.ManagerServiceClient
  10: }
  11:
X 12: func newGrpcIOHandler(client sparkv1.ManagerServiceClient) IOHandler {
X 13: 	return variableHandler{client}
X 14: }
  15:
X 16: func (g variableHandler) Inputs(jobKey string, names ...string) Inputs {
X 17: 	variables, err := g.client.GetInputs(context.Background(), newGetVariablesRequest(jobKey, names...))
X 18: 	if err != nil {
X 19: 		return newInputs(err, nil)
X 20: 	}
  21:
X 22: 	return newInputs(err, variables.Variables)
  23: }
  24:
X 25: func (g variableHandler) Input(jobKey, name string) Input {
X 26: 	return g.Inputs(jobKey, name).Get(name)
X 27: }
  28:
X 29: func (g variableHandler) Output(jobKey string, variables ...*Var) error {
X 30: 	request, err := newSetVariablesRequest(jobKey, variables...)
X 31: 	if err != nil {
X 32: 		return err
X 33: 	}
X 34: 	_, err = g.client.SetOutputs(context.Background(), request)
X 35: 	return err
  36: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/io_memory.go
   1: package spark_v1
   2:
   3: import (
   4: 	"fmt"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: 	"testing"
   7: )
   8:
   9: type inMemoryIOHandler struct {
  10: 	variables map[string]*Var
  11: 	t         *testing.T
  12: }
  13:
O 14: func NewInMemoryIOHandler(t *testing.T) TestIOHandler {
O 15: 	i := &inMemoryIOHandler{t: t, variables: map[string]*Var{}}
O 16: 	return i
O 17: }
  18:
O 19: func (i *inMemoryIOHandler) Inputs(jobKey string, names ...string) Inputs {
O 20: 	var (
O 21: 		vars = map[string]*sparkv1.Variable{}
O 22: 		err  error
O 23: 	)
O 24: 	for _, n := range names {
O 25: 		key := i.key(jobKey, n)
O 26: 		if v, ok := i.variables[key]; ok {
O 27: 			var va *sparkv1.Variable
O 28: 			va, err = newVariable(v.Name, v.MimeType, v.Value)
O 29: 			vars[v.Name] = va
O 30: 		}
  31: 	}
X 32: 	if len(vars) == 0 {
X 33: 		i.t.Fatal("no variables found for the params: ")
X 34: 	}
O 35: 	return newInputs(err, vars)
  36: }
  37:
O 38: func (i *inMemoryIOHandler) Input(jobKey, name string) Input {
O 39: 	inputs := i.Inputs(jobKey, name)
O 40: 	return inputs.Get(name)
O 41: }
  42:
X 43: func (i *inMemoryIOHandler) Output(jobKey string, variables ...*Var) error {
X 44: 	for _, v := range variables {
X 45: 		i.variables[i.key(jobKey, v.Name)] = v
X 46: 	}
X 47: 	return nil
  48: }
  49:
O 50: func (i *inMemoryIOHandler) SetVar(jobKey string, v *Var) {
O 51: 	i.variables[i.key(jobKey, v.Name)] = v
O 52: }
  53:
O 54: func (i *inMemoryIOHandler) key(jobKey, name string) string {
O 55: 	return fmt.Sprintf("%s_%s", jobKey, name)
O 56: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/progress_grpc.go
   1: package spark_v1
   2:
   3: import (
   4: 	"context"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: )
   7:
   8: type stageProgressHandler struct {
   9: 	client sparkv1.ManagerServiceClient
  10: }
  11:
X 12: func newGrpcStageProgressHandler(client sparkv1.ManagerServiceClient) StageProgressHandler {
X 13: 	return &stageProgressHandler{client: client}
X 14: }
  15:
X 16: func (g *stageProgressHandler) Get(jobKey, name string) (*sparkv1.StageStatus, error) {
X 17: 	resp, err := g.client.GetStageStatus(context.Background(), newGetStageStatusReq(jobKey, name))
X 18: 	return &resp.Status, err
X 19: }
  20:
X 21: func (g *stageProgressHandler) Set(stageStatus *sparkv1.SetStageStatusRequest) error {
X 22: 	_, err := g.client.SetStageStatus(context.Background(), stageStatus)
X 23: 	return err
X 24: }
  25:
X 26: func (g *stageProgressHandler) GetResult(jobKey, name string) Bindable {
X 27: 	result, err := g.client.GetStageResult(context.Background(), newStageResultReq(jobKey, name))
X 28: 	if err != nil {
X 29: 		return newResult(err, nil)
X 30: 	}
X 31: 	return newResult(nil, result)
  32: }
  33:
X 34: func (g *stageProgressHandler) SetResult(result *sparkv1.SetStageResultRequest) error {
X 35: 	_, err := g.client.SetStageResult(context.Background(), result)
X 36: 	return err
X 37: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/progress_memory.go
    1: package spark_v1
    2:
    3: import (
    4: 	"fmt"
    5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    6: 	"testing"
    7:
    8: 	"github.com/stretchr/testify/assert"
    9: )
   10:
   11: type InMemoryStageProgressHandler struct {
   12: 	t                  *testing.T
   13: 	stages             map[string]*sparkv1.SetStageStatusRequest
   14: 	results            map[string]*sparkv1.SetStageResultRequest
   15: 	behaviourSet       map[string]StageBehaviourParams
   16: 	behaviourSetResult map[string]ResultBehaviourParams
   17: }
   18:
O  19: func NewInMemoryStageProgressHandler(t *testing.T, seeds ...any) TestStageProgressHandler {
O  20: 	handler := InMemoryStageProgressHandler{t,
O  21: 		map[string]*sparkv1.SetStageStatusRequest{}, map[string]*sparkv1.SetStageResultRequest{},
O  22: 		map[string]StageBehaviourParams{}, map[string]ResultBehaviourParams{}}
O  23:
O  24: 	return &handler
O  25: }
   26:
O  27: func (i *InMemoryStageProgressHandler) Get(jobKey, name string) (*sparkv1.StageStatus, error) {
O  28: 	if stage, ok := i.stages[i.key(jobKey, name)]; ok {
O  29: 		return &stage.Status, nil
O  30: 	}
X  31: 	i.t.Fatalf("stage status no found for params >> jobKey: %s, stageName: %s", jobKey, name)
X  32: 	return nil, nil
   33: }
   34:
O  35: func (i *InMemoryStageProgressHandler) Set(stageStatus *sparkv1.SetStageStatusRequest) error {
X  36: 	if bp, ok := i.behaviourSet[stageStatus.Name]; ok {
X  37: 		if bp.status == stageStatus.Status && bp.err != nil {
X  38: 			return bp.err
X  39: 		}
   40: 	}
O  41: 	i.stages[i.key(stageStatus.Key, stageStatus.Name)] = stageStatus
O  42: 	return nil
   43: }
   44:
O  45: func (i *InMemoryStageProgressHandler) GetResult(jobKey, name string) Bindable {
O  46: 	if variable, ok := i.results[i.key(jobKey, name)]; ok {
O  47: 		return newResult(nil, &sparkv1.GetStageResultResponse{
O  48: 			Data: variable.Data,
O  49: 		})
O  50: 	}
X  51: 	i.t.Fatalf("stage result not found for params >> jobKey: %s, stageName: %s", jobKey, name)
X  52: 	return nil
   53: }
   54:
O  55: func (i *InMemoryStageProgressHandler) SetResult(result *sparkv1.SetStageResultRequest) error {
X  56: 	if br, ok := i.behaviourSetResult[result.Name]; ok {
X  57: 		if br.jobKey == result.GetKey() && br.name == result.Name && br.err != nil {
X  58: 			return br.err
X  59: 		}
   60: 	}
O  61: 	i.results[i.key(result.GetKey(), result.Name)] = result
O  62: 	return nil
   63: }
   64:
X  65: func (i *InMemoryStageProgressHandler) AddBehaviour() *Behaviour {
X  66: 	return &Behaviour{i: i}
X  67: }
   68:
X  69: func (i *InMemoryStageProgressHandler) ResetBehaviour() {
X  70: 	i.behaviourSet = map[string]StageBehaviourParams{}
X  71: }
   72:
O  73: func (i *InMemoryStageProgressHandler) AssertStageCompleted(jobKey, stageName string) {
O  74: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_COMPLETED)
O  75: }
   76:
X  77: func (i *InMemoryStageProgressHandler) AssertStageStarted(jobKey, stageName string) {
X  78: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_STARTED)
X  79: }
   80:
O  81: func (i *InMemoryStageProgressHandler) AssertStageSkipped(jobKey, stageName string) {
O  82: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_SKIPPED)
O  83: }
   84:
O  85: func (i *InMemoryStageProgressHandler) AssertStageCancelled(jobKey, stageName string) {
O  86: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_CANCELED)
O  87: }
   88:
O  89: func (i *InMemoryStageProgressHandler) AssertStageFailed(jobKey, stageName string) {
O  90: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_FAILED)
O  91: }
   92:
X  93: func (i *InMemoryStageProgressHandler) AssertStageResult(jobKey, stageName string, expectedStageResult any) {
X  94: 	r := i.GetResult(jobKey, stageName)
X  95: 	resB, err := r.Raw()
X  96: 	if err != nil {
X  97: 		i.t.Error(err)
X  98: 		return
X  99: 	}
X 100: 	req, err := newSetStageResultReq(jobKey, MimeTypeJSON, expectedStageResult)
X 101: 	if err != nil {
X 102: 		i.t.Error(err)
X 103: 		return
X 104: 	}
X 105: 	assert.Equal(i.t, req.Data, resB)
  106: }
  107:
O 108: func (i *InMemoryStageProgressHandler) key(jobKey, name string) string {
O 109: 	return fmt.Sprintf("%s_%s", jobKey, name)
O 110: }
  111:
O 112: func (i *InMemoryStageProgressHandler) assertStageStatus(jobKey, stageName string, expectedStatus sparkv1.StageStatus) {
O 113: 	status, err := i.Get(jobKey, stageName)
X 114: 	if err != nil {
X 115: 		i.t.Error(err)
X 116: 		return
X 117: 	}
O 118: 	assert.Equal(i.t, &expectedStatus, status, "spark status expected: '%s' got: '%s'", expectedStatus, status)
  119: }
  120:
  121: type Behaviour struct {
  122: 	i *InMemoryStageProgressHandler
  123: }
  124:
X 125: func (b *Behaviour) Set(stageName string, status sparkv1.StageStatus, err error) *InMemoryStageProgressHandler {
X 126: 	b.i.behaviourSet[stageName] = StageBehaviourParams{err: err, status: status}
X 127: 	return b.i
X 128: }
  129:
X 130: func (b *Behaviour) SetResult(jobKey, stageName string, err error) *InMemoryStageProgressHandler {
X 131: 	b.i.behaviourSetResult[stageName] = ResultBehaviourParams{jobKey: jobKey, name: stageName, err: err}
X 132: 	return b.i
X 133: }
  134:
  135: type StageBehaviourParams struct {
  136: 	err    error
  137: 	status sparkv1.StageStatus
  138: }
  139:
  140: type ResultBehaviourParams struct {
  141: 	jobKey string
  142: 	name   string
  143: 	err    error
  144: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/server.go
   1: package spark_v1
   2:
   3: import (
   4: 	"context"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: 	"net"
   7: 	"time"
   8:
   9: 	"google.golang.org/grpc"
  10: 	"google.golang.org/grpc/reflection"
  11: )
  12:
  13: /************************************************************************/
  14: // CONSTANTS
  15: /************************************************************************/
  16:
  17: var connectionTimeout = time.Second * 10
  18:
  19: /************************************************************************/
  20: // TYPES
  21: /************************************************************************/
  22:
  23: type server struct {
  24: 	config *config
  25: 	worker Worker
  26: 	svr    *grpc.Server
  27: }
  28:
  29: /************************************************************************/
  30: // SERVER
  31: /************************************************************************/
  32:
X 33: func newServer(cfg *config, worker Worker) *server {
X 34: 	return &server{config: cfg, worker: worker}
X 35: }
  36:
X 37: func (s *server) start() error {
X 38: 	// LOGGER SAMPLE >> add .Fields(fields) with the spark name on it
X 39: 	log := NewLogger()
X 40:
X 41: 	// nosemgrep
X 42: 	s.svr = grpc.NewServer(grpc.ConnectionTimeout(connectionTimeout))
X 43: 	sparkv1.RegisterAgentServiceServer(s.svr, s)
X 44:
X 45: 	reflection.Register(s.svr)
X 46:
X 47: 	listener, err := net.Listen("tcp", s.config.serverAddress())
X 48: 	if err != nil {
X 49: 		log.Error(err, "error setting up the listener")
X 50: 		return err
X 51: 	}
  52:
  53: 	// nosemgrep
X 54: 	if err = s.svr.Serve(listener); err != nil {
X 55: 		log.Error(err, "error starting the server")
X 56: 		return err
X 57: 	}
X 58: 	return nil
  59: }
  60:
X 61: func (s *server) stop() {
X 62: 	if s.svr != nil {
X 63: 		s.svr.GracefulStop()
X 64: 	}
  65: }
  66:
  67: /************************************************************************/
  68: // RPC IMPLEMENTATIONS
  69: /************************************************************************/
  70:
X 71: func (s *server) ExecuteJob(ctx context.Context, request *sparkv1.ExecuteJobRequest) (*sparkv1.Void, error) {
X 72: 	jobContext := NewSparkMetadata(ctx, request.Key, request.CorrelationId, request.TransactionId, nil)
X 73: 	go func() { // TODO goroutine pool
X 74: 		_ = s.worker.Execute(jobContext)
X 75: 	}()
X 76: 	return &sparkv1.Void{}, nil
  77: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/variable.go
   1: package spark_v1
   2:
   3: type Var struct {
   4: 	Name     string
   5: 	MimeType string
   6: 	Value    interface{}
   7: }
   8:
O  9: func NewVar(name, mimeType string, value interface{}) *Var {
O 10: 	return &Var{name, mimeType, value}
O 11: }

@waeljammal
Copy link
Collaborator Author

go-cover-view

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/codec.go
   1: package spark_v1
   2:
   3: import (
   4: 	"encoding/json"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: )
   7:
O  8: func MarshalBinary(data interface{}) ([]byte, error) {
O  9: 	return json.Marshal(data)
O 10: }
  11:
O 12: func UnmarshalBinaryTo(data []byte, out interface{}, mimeType string) error {
O 13: 	if mimeType == "" {
O 14: 		return sparkv1.SerdesMap[MimeTypeJSON].Unmarshal(data, &out)
O 15: 	} else {
O 16: 		return sparkv1.SerdesMap[mimeType].Unmarshal(data, &out)
O 17: 	}
  18: }
  19:
O 20: func ConvertBytes(data []byte, mimeType string) (out []byte, err error) {
O 21: 	var value interface{}
O 22: 	err = UnmarshalBinaryTo(data, &value, mimeType)
X 23: 	if err != nil {
X 24: 		return
X 25: 	}
  26:
O 27: 	switch v := value.(type) {
O 28: 	case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64:
O 29: 		return data, nil
O 30: 	case string:
O 31: 		return []byte(v), nil
X 32: 	default:
X 33: 		err = UnmarshalBinaryTo(data, &out, mimeType)
  34: 	}
  35:
X 36: 	return
  37: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/errors.go
    1: package spark_v1
    2:
    3: import (
    4: 	"encoding/json"
    5: 	"errors"
    6: 	"fmt"
    7: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    8: 	"google.golang.org/protobuf/types/known/structpb"
    9: 	"time"
   10: )
   11:
   12: /************************************************************************/
   13: // TYPES
   14: /************************************************************************/
   15:
   16: type ErrorOption = func(err *stageError) *stageError
   17:
   18: type stageError struct {
   19: 	err       error
   20: 	errorType sparkv1.ErrorType
   21: 	errorCode uint32
   22: 	metadata  map[string]any
   23: 	retry     *RetryConfig
   24: }
   25:
   26: type RetryConfig struct {
   27: 	times         uint
   28: 	backoffMillis uint
   29: }
   30:
   31: /************************************************************************/
   32: // ERRORS
   33: /************************************************************************/
   34:
   35: var (
   36: 	ErrStageDoesNotExist        = errors.New("stage does not exists")
   37: 	ErrBindValueFailed          = errors.New("bind value failed")
   38: 	ErrVariableNotFound         = errors.New("variable not found")
   39: 	ErrStageNotFoundInNodeChain = errors.New("stage not found in the node chain")
   40: 	ErrConditionalStageSkipped  = errors.New("conditional stage execution")
   41: 	ErrChainIsNotValid          = errors.New("chain is not valid")
   42: 	ErrInputVariableNotFound    = errors.New("input variable not found")
   43:
   44: 	errorTypeToStageStatusMapper = map[sparkv1.ErrorType]sparkv1.StageStatus{
   45: 		sparkv1.ErrorType_ERROR_TYPE_RETRY:              sparkv1.StageStatus_STAGE_FAILED,
   46: 		sparkv1.ErrorType_ERROR_TYPE_SKIP:               sparkv1.StageStatus_STAGE_SKIPPED,
   47: 		sparkv1.ErrorType_ERROR_TYPE_CANCELLED:          sparkv1.StageStatus_STAGE_CANCELED,
   48: 		sparkv1.ErrorType_ERROR_TYPE_FAILED_UNSPECIFIED: sparkv1.StageStatus_STAGE_FAILED,
   49: 	}
   50: )
   51:
   52: /************************************************************************/
   53: // ERROR FACTORIES
   54: /************************************************************************/
   55:
X  56: func newErrStageNotFoundInNodeChain(stage string) error {
X  57: 	return fmt.Errorf("%w: %s", ErrStageNotFoundInNodeChain, stage)
X  58: }
   59:
X  60: func newErrConditionalStageSkipped(stageName string) error {
X  61: 	return fmt.Errorf("%w: stage '%s' skipped", ErrConditionalStageSkipped, stageName)
X  62: }
   63:
O  64: func NewStageError(err error, opts ...ErrorOption) StageError {
O  65: 	stg := &stageError{err: err}
O  66: 	for _, opt := range opts {
O  67: 		stg = opt(stg)
O  68: 	}
O  69: 	return stg
   70: }
   71:
   72: /************************************************************************/
   73: // STAGE ERROR ENVELOPE
   74: /************************************************************************/
   75:
O  76: func (s *stageError) ErrorType() sparkv1.ErrorType {
O  77: 	return s.errorType
O  78: }
   79:
O  80: func (s *stageError) Code() uint32 {
O  81: 	return s.errorCode
O  82: }
   83:
O  84: func (s *stageError) Error() string {
O  85: 	return s.err.Error()
O  86: }
   87:
O  88: func (s *stageError) Metadata() map[string]any {
O  89: 	return s.metadata
O  90: }
   91:
O  92: func (s *stageError) ToErrorMessage() *sparkv1.Error {
O  93: 	err := &sparkv1.Error{
O  94: 		Error:     s.err.Error(),
O  95: 		ErrorCode: s.errorCode,
O  96: 		ErrorType: s.errorType,
O  97: 	}
O  98: 	if s.metadata != nil {
O  99: 		err.Metadata, _ = structpb.NewValue(s.metadata)
O 100: 	}
X 101: 	if s.retry != nil {
X 102: 		err.Retry = &sparkv1.RetryStrategy{Backoff: uint32(s.retry.backoffMillis), Count: uint32(s.retry.times)}
X 103: 	}
O 104: 	return err
  105: }
  106:
  107: /************************************************************************/
  108: // STAGE ERROR OPTIONS
  109: /************************************************************************/
  110:
X 111: func WithErrorCode(code uint32) ErrorOption {
X 112: 	return func(err *stageError) *stageError {
X 113: 		err.errorCode = code
X 114: 		return err
X 115: 	}
  116: }
  117:
X 118: func WithMetadata(metadata any) ErrorOption {
X 119: 	return func(err *stageError) *stageError {
X 120: 		err.parseMetadata(metadata)
X 121: 		return err
X 122: 	}
  123: }
  124:
X 125: func WithRetry(times uint, backoffMillis time.Duration) ErrorOption {
X 126: 	return func(err *stageError) *stageError {
X 127: 		err.retry = &RetryConfig{times, uint(backoffMillis.Milliseconds())}
X 128: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_RETRY
X 129: 		return err
X 130: 	}
  131: }
  132:
O 133: func WithSkip() ErrorOption {
O 134: 	return func(err *stageError) *stageError {
O 135: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_SKIP
O 136: 		return err
O 137: 	}
  138: }
  139:
O 140: func WithCancel() ErrorOption {
O 141: 	return func(err *stageError) *stageError {
O 142: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_CANCELLED
O 143: 		err.metadata = map[string]any{"reason": "canceled in stage"}
O 144: 		return err
O 145: 	}
  146: }
  147:
O 148: func WithFatal() ErrorOption {
O 149: 	return func(err *stageError) *stageError {
O 150: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_FATAL
O 151: 		return err
O 152: 	}
  153: }
  154:
O 155: func withErrorType(errorType sparkv1.ErrorType) ErrorOption {
O 156: 	return func(err *stageError) *stageError {
O 157: 		err.errorType = errorType
O 158: 		return err
O 159: 	}
  160: }
  161:
X 162: func (s *stageError) parseMetadata(metadata any) {
X 163: 	m := map[string]any{}
X 164: 	if metadata != nil {
X 165: 		mdBytes, _ := json.Marshal(metadata)
X 166: 		_ = json.Unmarshal(mdBytes, &m)
X 167: 	}
X 168: 	s.metadata = m
  169: }
  170:
  171: /************************************************************************/
  172: // HELPERS
  173: /************************************************************************/
  174:
O 175: func errorTypeToStageStatus(errType sparkv1.ErrorType) sparkv1.StageStatus {
O 176: 	if err, ok := errorTypeToStageStatusMapper[errType]; ok {
O 177: 		return err
O 178: 	}
O 179: 	return sparkv1.StageStatus_STAGE_FAILED
  180: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/executor.go
    1: package spark_v1
    2:
    3: import sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    4:
    5: const (
    6: 	stageLogField  = "stage"
    7: 	jobKeyLogField = "job_key"
    8: )
    9:
O  10: func (c *chain) execute(ctx SparkContext) StageError {
O  11: 	n, err := c.getNodeToResume(ctx.LastActiveStage())
X  12: 	if err != nil {
X  13: 		return NewStageError(err)
X  14: 	}
O  15: 	return c.runner(ctx, n)
   16: }
   17:
   18: //nolint:cyclop
O  19: func (c *chain) runner(ctx SparkContext, node *node) StageError {
O  20: 	stages := getStagesToResume(node, ctx.LastActiveStage())
O  21: 	for _, stg := range stages {
O  22: 		select {
X  23: 		case <-ctx.Ctx().Done():
X  24: 			return nil
O  25: 		default:
O  26: 			ctx.Log().AddFields(stageLogField, stg.name).AddFields(jobKeyLogField, ctx.JobKey())
O  27:
X  28: 			if err := stg.ApplyConditionalExecutionOptions(ctx, stg.name); err != nil {
X  29: 				er := updateStage(ctx, stg.name, withStageError(err))
X  30: 				if er != nil {
X  31: 					ctx.Log().Error(er, "error updating stage status to 'started'")
X  32: 					return NewStageError(er)
X  33: 				}
X  34: 				continue
   35: 			}
   36:
O  37: 			er := updateStage(ctx, stg.name, withStageStatus(sparkv1.StageStatus_STAGE_STARTED))
O  38:
X  39: 			if er != nil {
X  40: 				ctx.Log().Error(er, "error updating stage status to 'started'")
X  41: 				return NewStageError(er)
X  42: 			}
   43:
O  44: 			var result any
O  45: 			var stageErr StageError
O  46:
O  47: 			// stage execution is delegated in which case call the delegate
O  48: 			// instead and expect that it will invoke the stage and return a result, error
O  49: 			if ctx.delegateStage() != nil {
O  50: 				result, stageErr = ctx.delegateStage()(NewStageContext(ctx, stg.name), stg.cb)
O  51: 			} else {
O  52: 				result, stageErr = stg.cb(NewStageContext(ctx, stg.name))
O  53: 			}
   54:
O  55: 			if err := c.handleStageError(ctx, node, stg, stageErr); err != nil {
O  56: 				if err.ErrorType() == sparkv1.ErrorType_ERROR_TYPE_SKIP {
O  57: 					continue
   58: 				}
O  59: 				return err
   60: 			}
   61:
X  62: 			if err := storeStageResult(ctx, stg, result); err != nil {
X  63: 				return err
X  64: 			}
   65:
X  66: 			if err := updateStage(ctx, stg.name, withStageStatus(sparkv1.StageStatus_STAGE_COMPLETED)); err != nil {
X  67: 				ctx.Log().Error(err, "error setting the stage status to 'completed'")
X  68: 				return NewStageError(err)
X  69: 			}
   70: 		}
   71: 	}
   72:
O  73: 	select {
X  74: 	case <-ctx.Ctx().Done():
X  75: 		return nil
O  76: 	default:
O  77: 		if node.complete != nil {
X  78: 			if er := updateStage(ctx, node.complete.name, withStageStatus(sparkv1.StageStatus_STAGE_STARTED)); er != nil {
X  79: 				ctx.Log().Error(er, "error setting the completed stage status to 'started'")
X  80: 				return NewStageError(er)
X  81: 			}
   82:
O  83: 			var stageErr StageError
O  84:
O  85: 			if ctx.delegateComplete() != nil {
O  86: 				stageErr = ctx.delegateComplete()(NewCompleteContext(ctx, node.complete.name), node.complete.cb)
O  87: 			} else {
O  88: 				stageErr = node.complete.cb(NewCompleteContext(ctx, node.complete.name))
O  89: 			}
   90:
X  91: 			if e := updateStage(ctx, node.complete.name, withStageStatusOrError(sparkv1.StageStatus_STAGE_COMPLETED, stageErr)); e != nil {
X  92: 				ctx.Log().Error(e, "error setting the completed stage status to 'completed'")
X  93: 				return NewStageError(e)
X  94: 			}
O  95: 			return stageErr
   96: 		}
   97: 	}
   98:
X  99: 	return nil
  100: }
  101:
  102: //nolint:cyclop
O 103: func (c *chain) handleStageError(ctx SparkContext, node *node, stg *stage, err StageError) StageError {
O 104: 	if err == nil {
O 105: 		return nil
O 106: 	}
  107:
X 108: 	if e := updateStage(ctx, stg.name, withStageError(err)); e != nil {
X 109: 		ctx.Log().Error(err, "error updating stage status")
X 110: 		return NewStageError(e)
X 111: 	}
  112:
O 113: 	switch err.ErrorType() {
O 114: 	case sparkv1.ErrorType_ERROR_TYPE_FAILED_UNSPECIFIED:
O 115: 		if node.compensate != nil {
O 116: 			e := c.runner(ctx.WithoutLastActiveStage(), node.compensate)
X 117: 			if e != nil {
X 118: 				return e
X 119: 			}
  120: 		}
O 121: 		return err
O 122: 	case sparkv1.ErrorType_ERROR_TYPE_CANCELLED:
O 123: 		if node.cancel != nil {
O 124: 			e := c.runner(ctx.WithoutLastActiveStage(), node.cancel)
X 125: 			if e != nil {
X 126: 				return e
X 127: 			}
  128: 		}
O 129: 		return err
X 130: 	case sparkv1.ErrorType_ERROR_TYPE_RETRY:
X 131: 		return err
O 132: 	case sparkv1.ErrorType_ERROR_TYPE_SKIP:
O 133: 		return err
O 134: 	case sparkv1.ErrorType_ERROR_TYPE_FATAL:
O 135: 		fallthrough
O 136: 	default:
O 137: 		ctx.Log().Error(err, "unsupported error type returned from stage '%s'", stg.name)
O 138: 		return NewStageError(err, withErrorType(sparkv1.ErrorType_ERROR_TYPE_FATAL))
  139: 	}
  140: }
  141:
O 142: func storeStageResult(ctx SparkContext, stg *stage, result any) StageError {
O 143: 	if result != nil { //nolint:nestif
O 144: 		req, err := newSetStageResultReq(ctx.JobKey(), stg.name, result)
X 145: 		if err != nil {
X 146: 			ctx.Log().Error(err, "error creating set stage status request")
X 147: 			if e := updateStage(ctx, stg.name, withError(err)); e != nil {
X 148: 				ctx.Log().Error(err, "error updating stage status")
X 149: 				return NewStageError(e)
X 150: 			}
X 151: 			return NewStageError(err)
  152: 		}
X 153: 		if err := ctx.StageProgressHandler().SetResult(req); err != nil {
X 154: 			ctx.Log().Error(err, "error on set stage status request")
X 155: 			if e := updateStage(ctx, stg.name, withError(err)); e != nil {
X 156: 				ctx.Log().Error(err, "error updating stage status")
X 157: 				return NewStageError(e)
X 158: 			}
X 159: 			return NewStageError(err)
  160: 		}
  161: 	}
O 162: 	return nil
  163: }
  164:
  165: type updateStageOption = func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest
  166:
O 167: func withStageStatusOrError(status sparkv1.StageStatus, err StageError) updateStageOption {
O 168: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
X 169: 		if err != nil {
X 170: 			return withStageError(err)(stage)
X 171: 		}
O 172: 		return withStageStatus(status)(stage)
  173: 	}
  174: }
  175:
O 176: func withStageStatus(status sparkv1.StageStatus) updateStageOption {
O 177: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
O 178: 		stage.Status = status
O 179: 		return stage
O 180: 	}
  181: }
  182:
O 183: func withStageError(err StageError) updateStageOption {
O 184: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
X 185: 		if err == nil {
X 186: 			return stage
X 187: 		}
O 188: 		stage.Status = errorTypeToStageStatus(err.ErrorType())
O 189: 		stage.Err = err.ToErrorMessage()
O 190: 		return stage
  191: 	}
  192: }
  193:
X 194: func withError(err error) updateStageOption {
X 195: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
X 196: 		if err == nil {
X 197: 			return stage
X 198: 		}
X 199: 		stage.Status = sparkv1.StageStatus_STAGE_FAILED
X 200: 		stage.Err = NewStageError(err).ToErrorMessage()
X 201: 		return stage
  202: 	}
  203: }
  204:
O 205: func updateStage(ctx SparkContext, name string, opts ...updateStageOption) error {
O 206: 	req := &sparkv1.SetStageStatusRequest{Key: ctx.JobKey(), Name: name}
O 207: 	for _, opt := range opts {
O 208: 		req = opt(req)
O 209: 	}
O 210: 	return ctx.StageProgressHandler().Set(req)
  211: }
  212:
O 213: func getStagesToResume(n *node, lastActiveStage *sparkv1.LastActiveStage) []*stage {
O 214: 	if lastActiveStage == nil {
O 215: 		return n.stages
O 216: 	}
X 217: 	var stages []*stage
X 218: 	for idx, stg := range n.stages {
X 219: 		if stg.name == lastActiveStage.Name {
X 220: 			stages = append(stages, n.stages[idx:]...)
X 221: 		}
  222: 	}
X 223: 	return stages
  224: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/extensions.go
    1: package spark_v1
    2:
    3: import (
    4: 	"fmt"
    5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    6: )
    7:
    8: /************************************************************************/
    9: // FACTORIES
   10: /************************************************************************/
   11:
O  12: func newSetStageResultReq(jobKey, name string, data interface{}) (*sparkv1.SetStageResultRequest, error) {
O  13: 	b, err := MarshalBinary(data)
O  14:
O  15: 	return &sparkv1.SetStageResultRequest{
O  16: 		Key:  jobKey,
O  17: 		Name: name,
O  18: 		Data: b,
O  19: 	}, err
O  20: }
   21:
O  22: func newVariable(name, mimeType string, value interface{}) (*sparkv1.Variable, error) {
O  23: 	pbValue, err := MarshalBinary(value)
X  24: 	if err != nil {
X  25: 		return nil, fmt.Errorf("error creating variable named '%s': %w", name, err)
X  26: 	}
O  27: 	return &sparkv1.Variable{
O  28: 		Data:     pbValue,
O  29: 		MimeType: mimeType,
O  30: 	}, nil
   31: }
   32:
X  33: func newStageResultReq(jobKey, stageName string) *sparkv1.GetStageResultRequest {
X  34: 	return &sparkv1.GetStageResultRequest{
X  35: 		Name: stageName,
X  36: 		Key:  jobKey,
X  37: 	}
X  38: }
   39:
X  40: func newSetStageStatusReq(jobKey, stageName string, status sparkv1.StageStatus, err ...*sparkv1.Error) *sparkv1.SetStageStatusRequest {
X  41: 	sssr := &sparkv1.SetStageStatusRequest{
X  42: 		Name:   stageName,
X  43: 		Key:    jobKey,
X  44: 		Status: status,
X  45: 	}
X  46: 	if len(err) > 0 {
X  47: 		sssr.Err = err[0]
X  48: 	}
X  49: 	return sssr
   50: }
   51:
X  52: func newGetVariablesRequest(jobKey string, names ...string) *sparkv1.GetInputsRequest {
X  53: 	vr := &sparkv1.GetInputsRequest{
X  54: 		Key: jobKey,
X  55: 	}
X  56: 	vr.Names = append(vr.Names, names...)
X  57: 	return vr
X  58: }
   59:
X  60: func newSetVariablesRequest(jobKey string, variables ...*Var) (*sparkv1.SetOutputsRequest, error) {
X  61: 	m := map[string]*sparkv1.Variable{}
X  62: 	for _, v := range variables {
X  63: 		variable, err := newVariable(v.Name, v.MimeType, v.Value)
X  64: 		if err != nil {
X  65: 			return nil, err
X  66: 		}
X  67: 		m[v.Name] = variable
   68: 	}
X  69: 	return &sparkv1.SetOutputsRequest{Key: jobKey, Variables: m}, nil
   70: }
   71:
X  72: func newGetStageStatusReq(jobKey, stageName string) *sparkv1.GetStageStatusRequest {
X  73: 	return &sparkv1.GetStageStatusRequest{Key: jobKey, Name: stageName}
X  74: }
   75:
   76: /************************************************************************/
   77: // INPUT
   78: /************************************************************************/
   79:
   80: type input struct {
   81: 	variable *sparkv1.Variable
   82: 	err      error
   83: }
   84:
O  85: func newInput(variable *sparkv1.Variable, err error) *input {
O  86: 	return &input{variable: variable, err: err}
O  87: }
   88:
X  89: func (i *input) String() string {
X  90: 	return string(i.variable.Data)
X  91: }
   92:
O  93: func (i *input) Raw() ([]byte, error) {
X  94: 	if i.err != nil {
X  95: 		return nil, i.err
X  96: 	}
   97:
O  98: 	return ConvertBytes(i.variable.Data, i.variable.MimeType)
   99: }
  100:
O 101: func (i *input) Bind(a interface{}) error {
X 102: 	if i.err != nil {
X 103: 		return i.err
X 104: 	}
  105:
X 106: 	if err := UnmarshalBinaryTo(i.variable.Data, a, ""); err != nil {
X 107: 		return err
X 108: 	}
  109:
O 110: 	return nil
  111: }
  112:
  113: /************************************************************************/
  114: // BATCH INPUTS
  115: /************************************************************************/
  116:
  117: type inputs struct {
  118: 	vars map[string]*sparkv1.Variable
  119: 	err  error
  120: }
  121:
O 122: func newInputs(err error, vars map[string]*sparkv1.Variable) Inputs {
O 123: 	return &inputs{vars: vars, err: err}
O 124: }
  125:
O 126: func (v inputs) Get(name string) Bindable {
O 127: 	found, ok := v.vars[name]
O 128: 	if ok {
O 129: 		return newInput(found, v.err)
O 130: 	}
X 131: 	err := v.err
X 132: 	if err == nil {
X 133: 		err = ErrInputVariableNotFound
X 134: 	}
X 135: 	return newInput(nil, v.err)
  136: }
  137:
X 138: func (v inputs) Error() error {
X 139: 	return v.err
X 140: }
  141:
  142: /************************************************************************/
  143: // STAGE RESULT
  144: /************************************************************************/
  145:
  146: type result struct {
  147: 	result *sparkv1.GetStageResultResponse
  148: 	err    error
  149: }
  150:
O 151: func newResult(err error, r *sparkv1.GetStageResultResponse) Bindable {
O 152: 	return &result{
O 153: 		result: r,
O 154: 		err:    err,
O 155: 	}
O 156: }
  157:
O 158: func (r *result) Raw() ([]byte, error) {
X 159: 	if r.err != nil {
X 160: 		return nil, r.err
X 161: 	}
  162:
O 163: 	return ConvertBytes(r.result.Data, "")
  164: }
  165:
O 166: func (r *result) Bind(a interface{}) error {
X 167: 	if r.err != nil {
X 168: 		return r.err
X 169: 	}
  170:
O 171: 	return UnmarshalBinaryTo(r.result.Data, a, "")
  172: }
  173:
X 174: func (r *result) String() string {
X 175: 	return string(r.result.GetData())
X 176: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/helpers.go
   1: package spark_v1
   2:
   3: import (
   4: 	"errors"
   5: )
   6:
O  7: var CompleteSuccess = func(ctx CompleteContext) StageError {
O  8: 	return nil
O  9: }
  10:
X 11: var CompleteError = func(ctx CompleteContext) StageError {
X 12: 	return NewStageError(errors.New("complete failed"))
X 13: }
  14:
O 15: func appendIfNotNil[T any](array []*T, items ...*T) []*T {
O 16: 	for _, item := range items {
O 17: 		if item != nil {
O 18: 			array = append(array, item)
O 19: 		}
  20: 	}
O 21: 	return array
  22: }
  23:
O 24: func addBreadcrumb(nodes ...*node) {
O 25: 	var nextNodes []*node
O 26: 	for _, n := range nodes {
O 27: 		n.cancel.appendBreadcrumb(cancelNodeType, n.breadcrumb)
O 28: 		n.compensate.appendBreadcrumb(compensateNodeType, n.breadcrumb)
O 29: 		nextNodes = appendIfNotNil(nextNodes, n.compensate, n.cancel)
O 30: 	}
O 31: 	if len(nextNodes) > 0 {
O 32: 		addBreadcrumb(nextNodes...)
O 33: 	}
  34: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/io_grpc.go
   1: package spark_v1
   2:
   3: import (
   4: 	"context"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: )
   7:
   8: type variableHandler struct {
   9: 	client sparkv1.ManagerServiceClient
  10: }
  11:
X 12: func newGrpcIOHandler(client sparkv1.ManagerServiceClient) IOHandler {
X 13: 	return variableHandler{client}
X 14: }
  15:
X 16: func (g variableHandler) Inputs(jobKey string, names ...string) Inputs {
X 17: 	variables, err := g.client.GetInputs(context.Background(), newGetVariablesRequest(jobKey, names...))
X 18: 	if err != nil {
X 19: 		return newInputs(err, nil)
X 20: 	}
  21:
X 22: 	return newInputs(err, variables.Variables)
  23: }
  24:
X 25: func (g variableHandler) Input(jobKey, name string) Input {
X 26: 	return g.Inputs(jobKey, name).Get(name)
X 27: }
  28:
X 29: func (g variableHandler) Output(jobKey string, variables ...*Var) error {
X 30: 	request, err := newSetVariablesRequest(jobKey, variables...)
X 31: 	if err != nil {
X 32: 		return err
X 33: 	}
X 34: 	_, err = g.client.SetOutputs(context.Background(), request)
X 35: 	return err
  36: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/io_memory.go
   1: package spark_v1
   2:
   3: import (
   4: 	"fmt"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: 	"testing"
   7: )
   8:
   9: type inMemoryIOHandler struct {
  10: 	variables map[string]*Var
  11: 	t         *testing.T
  12: }
  13:
O 14: func NewInMemoryIOHandler(t *testing.T) TestIOHandler {
O 15: 	i := &inMemoryIOHandler{t: t, variables: map[string]*Var{}}
O 16: 	return i
O 17: }
  18:
O 19: func (i *inMemoryIOHandler) Inputs(jobKey string, names ...string) Inputs {
O 20: 	var (
O 21: 		vars = map[string]*sparkv1.Variable{}
O 22: 		err  error
O 23: 	)
O 24: 	for _, n := range names {
O 25: 		key := i.key(jobKey, n)
O 26: 		if v, ok := i.variables[key]; ok {
O 27: 			var va *sparkv1.Variable
O 28: 			va, err = newVariable(v.Name, v.MimeType, v.Value)
O 29: 			vars[v.Name] = va
O 30: 		}
  31: 	}
X 32: 	if len(vars) == 0 {
X 33: 		i.t.Fatal("no variables found for the params: ")
X 34: 	}
O 35: 	return newInputs(err, vars)
  36: }
  37:
O 38: func (i *inMemoryIOHandler) Input(jobKey, name string) Input {
O 39: 	inputs := i.Inputs(jobKey, name)
O 40: 	return inputs.Get(name)
O 41: }
  42:
X 43: func (i *inMemoryIOHandler) Output(jobKey string, variables ...*Var) error {
X 44: 	for _, v := range variables {
X 45: 		i.variables[i.key(jobKey, v.Name)] = v
X 46: 	}
X 47: 	return nil
  48: }
  49:
O 50: func (i *inMemoryIOHandler) SetVar(jobKey string, v *Var) {
O 51: 	i.variables[i.key(jobKey, v.Name)] = v
O 52: }
  53:
O 54: func (i *inMemoryIOHandler) key(jobKey, name string) string {
O 55: 	return fmt.Sprintf("%s_%s", jobKey, name)
O 56: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/progress_grpc.go
   1: package spark_v1
   2:
   3: import (
   4: 	"context"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: )
   7:
   8: type stageProgressHandler struct {
   9: 	client sparkv1.ManagerServiceClient
  10: }
  11:
X 12: func newGrpcStageProgressHandler(client sparkv1.ManagerServiceClient) StageProgressHandler {
X 13: 	return &stageProgressHandler{client: client}
X 14: }
  15:
X 16: func (g *stageProgressHandler) Get(jobKey, name string) (*sparkv1.StageStatus, error) {
X 17: 	resp, err := g.client.GetStageStatus(context.Background(), newGetStageStatusReq(jobKey, name))
X 18: 	return &resp.Status, err
X 19: }
  20:
X 21: func (g *stageProgressHandler) Set(stageStatus *sparkv1.SetStageStatusRequest) error {
X 22: 	_, err := g.client.SetStageStatus(context.Background(), stageStatus)
X 23: 	return err
X 24: }
  25:
X 26: func (g *stageProgressHandler) GetResult(jobKey, name string) Bindable {
X 27: 	result, err := g.client.GetStageResult(context.Background(), newStageResultReq(jobKey, name))
X 28: 	if err != nil {
X 29: 		return newResult(err, nil)
X 30: 	}
X 31: 	return newResult(nil, result)
  32: }
  33:
X 34: func (g *stageProgressHandler) SetResult(result *sparkv1.SetStageResultRequest) error {
X 35: 	_, err := g.client.SetStageResult(context.Background(), result)
X 36: 	return err
X 37: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/progress_memory.go
    1: package spark_v1
    2:
    3: import (
    4: 	"fmt"
    5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    6: 	"testing"
    7:
    8: 	"github.com/stretchr/testify/assert"
    9: )
   10:
   11: type InMemoryStageProgressHandler struct {
   12: 	t                  *testing.T
   13: 	stages             map[string]*sparkv1.SetStageStatusRequest
   14: 	results            map[string]*sparkv1.SetStageResultRequest
   15: 	behaviourSet       map[string]StageBehaviourParams
   16: 	behaviourSetResult map[string]ResultBehaviourParams
   17: }
   18:
O  19: func NewInMemoryStageProgressHandler(t *testing.T, seeds ...any) TestStageProgressHandler {
O  20: 	handler := InMemoryStageProgressHandler{t,
O  21: 		map[string]*sparkv1.SetStageStatusRequest{}, map[string]*sparkv1.SetStageResultRequest{},
O  22: 		map[string]StageBehaviourParams{}, map[string]ResultBehaviourParams{}}
O  23:
O  24: 	return &handler
O  25: }
   26:
O  27: func (i *InMemoryStageProgressHandler) Get(jobKey, name string) (*sparkv1.StageStatus, error) {
O  28: 	if stage, ok := i.stages[i.key(jobKey, name)]; ok {
O  29: 		return &stage.Status, nil
O  30: 	}
X  31: 	i.t.Fatalf("stage status no found for params >> jobKey: %s, stageName: %s", jobKey, name)
X  32: 	return nil, nil
   33: }
   34:
O  35: func (i *InMemoryStageProgressHandler) Set(stageStatus *sparkv1.SetStageStatusRequest) error {
X  36: 	if bp, ok := i.behaviourSet[stageStatus.Name]; ok {
X  37: 		if bp.status == stageStatus.Status && bp.err != nil {
X  38: 			return bp.err
X  39: 		}
   40: 	}
O  41: 	i.stages[i.key(stageStatus.Key, stageStatus.Name)] = stageStatus
O  42: 	return nil
   43: }
   44:
O  45: func (i *InMemoryStageProgressHandler) GetResult(jobKey, name string) Bindable {
O  46: 	if variable, ok := i.results[i.key(jobKey, name)]; ok {
O  47: 		return newResult(nil, &sparkv1.GetStageResultResponse{
O  48: 			Data: variable.Data,
O  49: 		})
O  50: 	}
X  51: 	i.t.Fatalf("stage result not found for params >> jobKey: %s, stageName: %s", jobKey, name)
X  52: 	return nil
   53: }
   54:
O  55: func (i *InMemoryStageProgressHandler) SetResult(result *sparkv1.SetStageResultRequest) error {
X  56: 	if br, ok := i.behaviourSetResult[result.Name]; ok {
X  57: 		if br.jobKey == result.GetKey() && br.name == result.Name && br.err != nil {
X  58: 			return br.err
X  59: 		}
   60: 	}
O  61: 	i.results[i.key(result.GetKey(), result.Name)] = result
O  62: 	return nil
   63: }
   64:
X  65: func (i *InMemoryStageProgressHandler) AddBehaviour() *Behaviour {
X  66: 	return &Behaviour{i: i}
X  67: }
   68:
X  69: func (i *InMemoryStageProgressHandler) ResetBehaviour() {
X  70: 	i.behaviourSet = map[string]StageBehaviourParams{}
X  71: }
   72:
O  73: func (i *InMemoryStageProgressHandler) AssertStageCompleted(jobKey, stageName string) {
O  74: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_COMPLETED)
O  75: }
   76:
X  77: func (i *InMemoryStageProgressHandler) AssertStageStarted(jobKey, stageName string) {
X  78: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_STARTED)
X  79: }
   80:
O  81: func (i *InMemoryStageProgressHandler) AssertStageSkipped(jobKey, stageName string) {
O  82: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_SKIPPED)
O  83: }
   84:
O  85: func (i *InMemoryStageProgressHandler) AssertStageCancelled(jobKey, stageName string) {
O  86: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_CANCELED)
O  87: }
   88:
O  89: func (i *InMemoryStageProgressHandler) AssertStageFailed(jobKey, stageName string) {
O  90: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_FAILED)
O  91: }
   92:
X  93: func (i *InMemoryStageProgressHandler) AssertStageResult(jobKey, stageName string, expectedStageResult any) {
X  94: 	r := i.GetResult(jobKey, stageName)
X  95: 	resB, err := r.Raw()
X  96: 	if err != nil {
X  97: 		i.t.Error(err)
X  98: 		return
X  99: 	}
X 100: 	req, err := newSetStageResultReq(jobKey, MimeTypeJSON, expectedStageResult)
X 101: 	if err != nil {
X 102: 		i.t.Error(err)
X 103: 		return
X 104: 	}
X 105: 	assert.Equal(i.t, req.Data, resB)
  106: }
  107:
O 108: func (i *InMemoryStageProgressHandler) key(jobKey, name string) string {
O 109: 	return fmt.Sprintf("%s_%s", jobKey, name)
O 110: }
  111:
O 112: func (i *InMemoryStageProgressHandler) assertStageStatus(jobKey, stageName string, expectedStatus sparkv1.StageStatus) {
O 113: 	status, err := i.Get(jobKey, stageName)
X 114: 	if err != nil {
X 115: 		i.t.Error(err)
X 116: 		return
X 117: 	}
O 118: 	assert.Equal(i.t, &expectedStatus, status, "spark status expected: '%s' got: '%s'", expectedStatus, status)
  119: }
  120:
  121: type Behaviour struct {
  122: 	i *InMemoryStageProgressHandler
  123: }
  124:
X 125: func (b *Behaviour) Set(stageName string, status sparkv1.StageStatus, err error) *InMemoryStageProgressHandler {
X 126: 	b.i.behaviourSet[stageName] = StageBehaviourParams{err: err, status: status}
X 127: 	return b.i
X 128: }
  129:
X 130: func (b *Behaviour) SetResult(jobKey, stageName string, err error) *InMemoryStageProgressHandler {
X 131: 	b.i.behaviourSetResult[stageName] = ResultBehaviourParams{jobKey: jobKey, name: stageName, err: err}
X 132: 	return b.i
X 133: }
  134:
  135: type StageBehaviourParams struct {
  136: 	err    error
  137: 	status sparkv1.StageStatus
  138: }
  139:
  140: type ResultBehaviourParams struct {
  141: 	jobKey string
  142: 	name   string
  143: 	err    error
  144: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/server.go
   1: package spark_v1
   2:
   3: import (
   4: 	"context"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: 	"net"
   7: 	"time"
   8:
   9: 	"google.golang.org/grpc"
  10: 	"google.golang.org/grpc/reflection"
  11: )
  12:
  13: /************************************************************************/
  14: // CONSTANTS
  15: /************************************************************************/
  16:
  17: var connectionTimeout = time.Second * 10
  18:
  19: /************************************************************************/
  20: // TYPES
  21: /************************************************************************/
  22:
  23: type server struct {
  24: 	config *config
  25: 	worker Worker
  26: 	svr    *grpc.Server
  27: }
  28:
  29: /************************************************************************/
  30: // SERVER
  31: /************************************************************************/
  32:
X 33: func newServer(cfg *config, worker Worker) *server {
X 34: 	return &server{config: cfg, worker: worker}
X 35: }
  36:
X 37: func (s *server) start() error {
X 38: 	// LOGGER SAMPLE >> add .Fields(fields) with the spark name on it
X 39: 	log := NewLogger()
X 40:
X 41: 	// nosemgrep
X 42: 	s.svr = grpc.NewServer(grpc.ConnectionTimeout(connectionTimeout))
X 43: 	sparkv1.RegisterAgentServiceServer(s.svr, s)
X 44:
X 45: 	reflection.Register(s.svr)
X 46:
X 47: 	listener, err := net.Listen("tcp", s.config.serverAddress())
X 48: 	if err != nil {
X 49: 		log.Error(err, "error setting up the listener")
X 50: 		return err
X 51: 	}
  52:
  53: 	// nosemgrep
X 54: 	if err = s.svr.Serve(listener); err != nil {
X 55: 		log.Error(err, "error starting the server")
X 56: 		return err
X 57: 	}
X 58: 	return nil
  59: }
  60:
X 61: func (s *server) stop() {
X 62: 	if s.svr != nil {
X 63: 		s.svr.GracefulStop()
X 64: 	}
  65: }
  66:
  67: /************************************************************************/
  68: // RPC IMPLEMENTATIONS
  69: /************************************************************************/
  70:
X 71: func (s *server) ExecuteJob(ctx context.Context, request *sparkv1.ExecuteJobRequest) (*sparkv1.Void, error) {
X 72: 	jobContext := NewSparkMetadata(ctx, request.Key, request.CorrelationId, request.TransactionId, nil)
X 73: 	go func() { // TODO goroutine pool
X 74: 		_ = s.worker.Execute(jobContext)
X 75: 	}()
X 76: 	return &sparkv1.Void{}, nil
  77: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/variable.go
   1: package spark_v1
   2:
   3: type Var struct {
   4: 	Name     string
   5: 	MimeType string
   6: 	Value    interface{}
   7: }
   8:
O  9: func NewVar(name, mimeType string, value interface{}) *Var {
O 10: 	return &Var{name, mimeType, value}
O 11: }

@@ -1,108 +1,72 @@
package sparkv1

import (
"encoding/json"
"google.golang.org/protobuf/types/known/structpb"
jsoniter "github.com/json-iterator/go"
)

const (
NoMimeType = ""
MimeTypeJSON = "application/json"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should these not be in a const global as im sure they will be used across everywhere

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would rather not create multiple packages for the developer to deal with and developer can put in any mime type they want, it's a plain string, we don't really need to provide them with a constant as they can define any mime type. The reason those are there is because of that extension, but I will update this PR to delete that extension I don't think we even need it anymore because the data coming in is now bytes.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

but surely there is a lib or something that defines all the common mimetypes? Just hate seeing this same constant defined everywhere, its annoying for developers to always remember the different quirky ways someone typed the const name.


func UnmarshalBinaryTo(data []byte, out interface{}, mimeType string) error {
if mimeType == "" {
return sparkv1.SerdesMap[MimeTypeJSON].Unmarshal(data, &out)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

where is this defined? MimeTypeJSON

@@ -3,9 +3,9 @@ package spark_v1
type Var struct {
Name string
MimeType string
Value any
Value interface{}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why use interface{} over any?

@waeljammal
Copy link
Collaborator Author

go-cover-view

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/errors.go
    1: package spark_v1
    2:
    3: import (
    4: 	"encoding/json"
    5: 	"errors"
    6: 	"fmt"
    7: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    8: 	"google.golang.org/protobuf/types/known/structpb"
    9: 	"time"
   10: )
   11:
   12: /************************************************************************/
   13: // TYPES
   14: /************************************************************************/
   15:
   16: type ErrorOption = func(err *stageError) *stageError
   17:
   18: type stageError struct {
   19: 	err       error
   20: 	errorType sparkv1.ErrorType
   21: 	errorCode uint32
   22: 	metadata  map[string]any
   23: 	retry     *RetryConfig
   24: }
   25:
   26: type RetryConfig struct {
   27: 	times         uint
   28: 	backoffMillis uint
   29: }
   30:
   31: /************************************************************************/
   32: // ERRORS
   33: /************************************************************************/
   34:
   35: var (
   36: 	ErrStageDoesNotExist        = errors.New("stage does not exists")
   37: 	ErrBindValueFailed          = errors.New("bind value failed")
   38: 	ErrVariableNotFound         = errors.New("variable not found")
   39: 	ErrStageNotFoundInNodeChain = errors.New("stage not found in the node chain")
   40: 	ErrConditionalStageSkipped  = errors.New("conditional stage execution")
   41: 	ErrChainIsNotValid          = errors.New("chain is not valid")
   42: 	ErrInputVariableNotFound    = errors.New("input variable not found")
   43:
   44: 	errorTypeToStageStatusMapper = map[sparkv1.ErrorType]sparkv1.StageStatus{
   45: 		sparkv1.ErrorType_ERROR_TYPE_RETRY:              sparkv1.StageStatus_STAGE_FAILED,
   46: 		sparkv1.ErrorType_ERROR_TYPE_SKIP:               sparkv1.StageStatus_STAGE_SKIPPED,
   47: 		sparkv1.ErrorType_ERROR_TYPE_CANCELLED:          sparkv1.StageStatus_STAGE_CANCELED,
   48: 		sparkv1.ErrorType_ERROR_TYPE_FAILED_UNSPECIFIED: sparkv1.StageStatus_STAGE_FAILED,
   49: 	}
   50: )
   51:
   52: /************************************************************************/
   53: // ERROR FACTORIES
   54: /************************************************************************/
   55:
X  56: func newErrStageNotFoundInNodeChain(stage string) error {
X  57: 	return fmt.Errorf("%w: %s", ErrStageNotFoundInNodeChain, stage)
X  58: }
   59:
X  60: func newErrConditionalStageSkipped(stageName string) error {
X  61: 	return fmt.Errorf("%w: stage '%s' skipped", ErrConditionalStageSkipped, stageName)
X  62: }
   63:
O  64: func NewStageError(err error, opts ...ErrorOption) StageError {
O  65: 	stg := &stageError{err: err}
O  66: 	for _, opt := range opts {
O  67: 		stg = opt(stg)
O  68: 	}
O  69: 	return stg
   70: }
   71:
   72: /************************************************************************/
   73: // STAGE ERROR ENVELOPE
   74: /************************************************************************/
   75:
O  76: func (s *stageError) ErrorType() sparkv1.ErrorType {
O  77: 	return s.errorType
O  78: }
   79:
O  80: func (s *stageError) Code() uint32 {
O  81: 	return s.errorCode
O  82: }
   83:
O  84: func (s *stageError) Error() string {
O  85: 	return s.err.Error()
O  86: }
   87:
O  88: func (s *stageError) Metadata() map[string]any {
O  89: 	return s.metadata
O  90: }
   91:
O  92: func (s *stageError) ToErrorMessage() *sparkv1.Error {
O  93: 	err := &sparkv1.Error{
O  94: 		Error:     s.err.Error(),
O  95: 		ErrorCode: s.errorCode,
O  96: 		ErrorType: s.errorType,
O  97: 	}
O  98: 	if s.metadata != nil {
O  99: 		err.Metadata, _ = structpb.NewValue(s.metadata)
O 100: 	}
X 101: 	if s.retry != nil {
X 102: 		err.Retry = &sparkv1.RetryStrategy{Backoff: uint32(s.retry.backoffMillis), Count: uint32(s.retry.times)}
X 103: 	}
O 104: 	return err
  105: }
  106:
  107: /************************************************************************/
  108: // STAGE ERROR OPTIONS
  109: /************************************************************************/
  110:
X 111: func WithErrorCode(code uint32) ErrorOption {
X 112: 	return func(err *stageError) *stageError {
X 113: 		err.errorCode = code
X 114: 		return err
X 115: 	}
  116: }
  117:
X 118: func WithMetadata(metadata any) ErrorOption {
X 119: 	return func(err *stageError) *stageError {
X 120: 		err.parseMetadata(metadata)
X 121: 		return err
X 122: 	}
  123: }
  124:
X 125: func WithRetry(times uint, backoffMillis time.Duration) ErrorOption {
X 126: 	return func(err *stageError) *stageError {
X 127: 		err.retry = &RetryConfig{times, uint(backoffMillis.Milliseconds())}
X 128: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_RETRY
X 129: 		return err
X 130: 	}
  131: }
  132:
O 133: func WithSkip() ErrorOption {
O 134: 	return func(err *stageError) *stageError {
O 135: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_SKIP
O 136: 		return err
O 137: 	}
  138: }
  139:
O 140: func WithCancel() ErrorOption {
O 141: 	return func(err *stageError) *stageError {
O 142: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_CANCELLED
O 143: 		err.metadata = map[string]any{"reason": "canceled in stage"}
O 144: 		return err
O 145: 	}
  146: }
  147:
O 148: func WithFatal() ErrorOption {
O 149: 	return func(err *stageError) *stageError {
O 150: 		err.errorType = sparkv1.ErrorType_ERROR_TYPE_FATAL
O 151: 		return err
O 152: 	}
  153: }
  154:
O 155: func withErrorType(errorType sparkv1.ErrorType) ErrorOption {
O 156: 	return func(err *stageError) *stageError {
O 157: 		err.errorType = errorType
O 158: 		return err
O 159: 	}
  160: }
  161:
X 162: func (s *stageError) parseMetadata(metadata any) {
X 163: 	m := map[string]any{}
X 164: 	if metadata != nil {
X 165: 		mdBytes, _ := json.Marshal(metadata)
X 166: 		_ = json.Unmarshal(mdBytes, &m)
X 167: 	}
X 168: 	s.metadata = m
  169: }
  170:
  171: /************************************************************************/
  172: // HELPERS
  173: /************************************************************************/
  174:
O 175: func errorTypeToStageStatus(errType sparkv1.ErrorType) sparkv1.StageStatus {
O 176: 	if err, ok := errorTypeToStageStatusMapper[errType]; ok {
O 177: 		return err
O 178: 	}
O 179: 	return sparkv1.StageStatus_STAGE_FAILED
  180: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/executor.go
    1: package spark_v1
    2:
    3: import sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    4:
    5: const (
    6: 	stageLogField  = "stage"
    7: 	jobKeyLogField = "job_key"
    8: )
    9:
O  10: func (c *chain) execute(ctx SparkContext) StageError {
O  11: 	n, err := c.getNodeToResume(ctx.LastActiveStage())
X  12: 	if err != nil {
X  13: 		return NewStageError(err)
X  14: 	}
O  15: 	return c.runner(ctx, n)
   16: }
   17:
   18: //nolint:cyclop
O  19: func (c *chain) runner(ctx SparkContext, node *node) StageError {
O  20: 	stages := getStagesToResume(node, ctx.LastActiveStage())
O  21: 	for _, stg := range stages {
O  22: 		select {
X  23: 		case <-ctx.Ctx().Done():
X  24: 			return nil
O  25: 		default:
O  26: 			ctx.Log().AddFields(stageLogField, stg.name).AddFields(jobKeyLogField, ctx.JobKey())
O  27:
X  28: 			if err := stg.ApplyConditionalExecutionOptions(ctx, stg.name); err != nil {
X  29: 				er := updateStage(ctx, stg.name, withStageError(err))
X  30: 				if er != nil {
X  31: 					ctx.Log().Error(er, "error updating stage status to 'started'")
X  32: 					return NewStageError(er)
X  33: 				}
X  34: 				continue
   35: 			}
   36:
O  37: 			er := updateStage(ctx, stg.name, withStageStatus(sparkv1.StageStatus_STAGE_STARTED))
O  38:
X  39: 			if er != nil {
X  40: 				ctx.Log().Error(er, "error updating stage status to 'started'")
X  41: 				return NewStageError(er)
X  42: 			}
   43:
O  44: 			var result any
O  45: 			var stageErr StageError
O  46:
O  47: 			// stage execution is delegated in which case call the delegate
O  48: 			// instead and expect that it will invoke the stage and return a result, error
O  49: 			if ctx.delegateStage() != nil {
O  50: 				result, stageErr = ctx.delegateStage()(NewStageContext(ctx, stg.name), stg.cb)
O  51: 			} else {
O  52: 				result, stageErr = stg.cb(NewStageContext(ctx, stg.name))
O  53: 			}
   54:
O  55: 			if err := c.handleStageError(ctx, node, stg, stageErr); err != nil {
O  56: 				if err.ErrorType() == sparkv1.ErrorType_ERROR_TYPE_SKIP {
O  57: 					continue
   58: 				}
O  59: 				return err
   60: 			}
   61:
X  62: 			if err := storeStageResult(ctx, stg, result); err != nil {
X  63: 				return err
X  64: 			}
   65:
X  66: 			if err := updateStage(ctx, stg.name, withStageStatus(sparkv1.StageStatus_STAGE_COMPLETED)); err != nil {
X  67: 				ctx.Log().Error(err, "error setting the stage status to 'completed'")
X  68: 				return NewStageError(err)
X  69: 			}
   70: 		}
   71: 	}
   72:
O  73: 	select {
X  74: 	case <-ctx.Ctx().Done():
X  75: 		return nil
O  76: 	default:
O  77: 		if node.complete != nil {
X  78: 			if er := updateStage(ctx, node.complete.name, withStageStatus(sparkv1.StageStatus_STAGE_STARTED)); er != nil {
X  79: 				ctx.Log().Error(er, "error setting the completed stage status to 'started'")
X  80: 				return NewStageError(er)
X  81: 			}
   82:
O  83: 			var stageErr StageError
O  84:
O  85: 			if ctx.delegateComplete() != nil {
O  86: 				stageErr = ctx.delegateComplete()(NewCompleteContext(ctx, node.complete.name), node.complete.cb)
O  87: 			} else {
O  88: 				stageErr = node.complete.cb(NewCompleteContext(ctx, node.complete.name))
O  89: 			}
   90:
X  91: 			if e := updateStage(ctx, node.complete.name, withStageStatusOrError(sparkv1.StageStatus_STAGE_COMPLETED, stageErr)); e != nil {
X  92: 				ctx.Log().Error(e, "error setting the completed stage status to 'completed'")
X  93: 				return NewStageError(e)
X  94: 			}
O  95: 			return stageErr
   96: 		}
   97: 	}
   98:
X  99: 	return nil
  100: }
  101:
  102: //nolint:cyclop
O 103: func (c *chain) handleStageError(ctx SparkContext, node *node, stg *stage, err StageError) StageError {
O 104: 	if err == nil {
O 105: 		return nil
O 106: 	}
  107:
X 108: 	if e := updateStage(ctx, stg.name, withStageError(err)); e != nil {
X 109: 		ctx.Log().Error(err, "error updating stage status")
X 110: 		return NewStageError(e)
X 111: 	}
  112:
O 113: 	switch err.ErrorType() {
O 114: 	case sparkv1.ErrorType_ERROR_TYPE_FAILED_UNSPECIFIED:
O 115: 		if node.compensate != nil {
O 116: 			e := c.runner(ctx.WithoutLastActiveStage(), node.compensate)
X 117: 			if e != nil {
X 118: 				return e
X 119: 			}
  120: 		}
O 121: 		return err
O 122: 	case sparkv1.ErrorType_ERROR_TYPE_CANCELLED:
O 123: 		if node.cancel != nil {
O 124: 			e := c.runner(ctx.WithoutLastActiveStage(), node.cancel)
X 125: 			if e != nil {
X 126: 				return e
X 127: 			}
  128: 		}
O 129: 		return err
X 130: 	case sparkv1.ErrorType_ERROR_TYPE_RETRY:
X 131: 		return err
O 132: 	case sparkv1.ErrorType_ERROR_TYPE_SKIP:
O 133: 		return err
O 134: 	case sparkv1.ErrorType_ERROR_TYPE_FATAL:
O 135: 		fallthrough
O 136: 	default:
O 137: 		ctx.Log().Error(err, "unsupported error type returned from stage '%s'", stg.name)
O 138: 		return NewStageError(err, withErrorType(sparkv1.ErrorType_ERROR_TYPE_FATAL))
  139: 	}
  140: }
  141:
O 142: func storeStageResult(ctx SparkContext, stg *stage, result any) StageError {
O 143: 	if result != nil { //nolint:nestif
O 144: 		req, err := newSetStageResultReq(ctx.JobKey(), stg.name, result)
X 145: 		if err != nil {
X 146: 			ctx.Log().Error(err, "error creating set stage status request")
X 147: 			if e := updateStage(ctx, stg.name, withError(err)); e != nil {
X 148: 				ctx.Log().Error(err, "error updating stage status")
X 149: 				return NewStageError(e)
X 150: 			}
X 151: 			return NewStageError(err)
  152: 		}
X 153: 		if err := ctx.StageProgressHandler().SetResult(req); err != nil {
X 154: 			ctx.Log().Error(err, "error on set stage status request")
X 155: 			if e := updateStage(ctx, stg.name, withError(err)); e != nil {
X 156: 				ctx.Log().Error(err, "error updating stage status")
X 157: 				return NewStageError(e)
X 158: 			}
X 159: 			return NewStageError(err)
  160: 		}
  161: 	}
O 162: 	return nil
  163: }
  164:
  165: type updateStageOption = func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest
  166:
O 167: func withStageStatusOrError(status sparkv1.StageStatus, err StageError) updateStageOption {
O 168: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
X 169: 		if err != nil {
X 170: 			return withStageError(err)(stage)
X 171: 		}
O 172: 		return withStageStatus(status)(stage)
  173: 	}
  174: }
  175:
O 176: func withStageStatus(status sparkv1.StageStatus) updateStageOption {
O 177: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
O 178: 		stage.Status = status
O 179: 		return stage
O 180: 	}
  181: }
  182:
O 183: func withStageError(err StageError) updateStageOption {
O 184: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
X 185: 		if err == nil {
X 186: 			return stage
X 187: 		}
O 188: 		stage.Status = errorTypeToStageStatus(err.ErrorType())
O 189: 		stage.Err = err.ToErrorMessage()
O 190: 		return stage
  191: 	}
  192: }
  193:
X 194: func withError(err error) updateStageOption {
X 195: 	return func(stage *sparkv1.SetStageStatusRequest) *sparkv1.SetStageStatusRequest {
X 196: 		if err == nil {
X 197: 			return stage
X 198: 		}
X 199: 		stage.Status = sparkv1.StageStatus_STAGE_FAILED
X 200: 		stage.Err = NewStageError(err).ToErrorMessage()
X 201: 		return stage
  202: 	}
  203: }
  204:
O 205: func updateStage(ctx SparkContext, name string, opts ...updateStageOption) error {
O 206: 	req := &sparkv1.SetStageStatusRequest{Key: ctx.JobKey(), Name: name}
O 207: 	for _, opt := range opts {
O 208: 		req = opt(req)
O 209: 	}
O 210: 	return ctx.StageProgressHandler().Set(req)
  211: }
  212:
O 213: func getStagesToResume(n *node, lastActiveStage *sparkv1.LastActiveStage) []*stage {
O 214: 	if lastActiveStage == nil {
O 215: 		return n.stages
O 216: 	}
X 217: 	var stages []*stage
X 218: 	for idx, stg := range n.stages {
X 219: 		if stg.name == lastActiveStage.Name {
X 220: 			stages = append(stages, n.stages[idx:]...)
X 221: 		}
  222: 	}
X 223: 	return stages
  224: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/extensions.go
    1: package spark_v1
    2:
    3: import (
    4: 	"fmt"
    5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    6: )
    7:
    8: /************************************************************************/
    9: // FACTORIES
   10: /************************************************************************/
   11:
O  12: func newSetStageResultReq(jobKey, name string, data interface{}) (*sparkv1.SetStageResultRequest, error) {
O  13: 	b, err := sparkv1.MarshalBinary(data)
O  14:
O  15: 	return &sparkv1.SetStageResultRequest{
O  16: 		Key:  jobKey,
O  17: 		Name: name,
O  18: 		Data: b,
O  19: 	}, err
O  20: }
   21:
O  22: func newVariable(name, mimeType string, value interface{}) (*sparkv1.Variable, error) {
O  23: 	pbValue, err := sparkv1.MarshalBinary(value)
X  24: 	if err != nil {
X  25: 		return nil, fmt.Errorf("error creating variable named '%s': %w", name, err)
X  26: 	}
O  27: 	return &sparkv1.Variable{
O  28: 		Data:     pbValue,
O  29: 		MimeType: mimeType,
O  30: 	}, nil
   31: }
   32:
X  33: func newStageResultReq(jobKey, stageName string) *sparkv1.GetStageResultRequest {
X  34: 	return &sparkv1.GetStageResultRequest{
X  35: 		Name: stageName,
X  36: 		Key:  jobKey,
X  37: 	}
X  38: }
   39:
X  40: func newSetStageStatusReq(jobKey, stageName string, status sparkv1.StageStatus, err ...*sparkv1.Error) *sparkv1.SetStageStatusRequest {
X  41: 	sssr := &sparkv1.SetStageStatusRequest{
X  42: 		Name:   stageName,
X  43: 		Key:    jobKey,
X  44: 		Status: status,
X  45: 	}
X  46: 	if len(err) > 0 {
X  47: 		sssr.Err = err[0]
X  48: 	}
X  49: 	return sssr
   50: }
   51:
X  52: func newGetVariablesRequest(jobKey string, names ...string) *sparkv1.GetInputsRequest {
X  53: 	vr := &sparkv1.GetInputsRequest{
X  54: 		Key: jobKey,
X  55: 	}
X  56: 	vr.Names = append(vr.Names, names...)
X  57: 	return vr
X  58: }
   59:
X  60: func newSetVariablesRequest(jobKey string, variables ...*Var) (*sparkv1.SetOutputsRequest, error) {
X  61: 	m := map[string]*sparkv1.Variable{}
X  62: 	for _, v := range variables {
X  63: 		variable, err := newVariable(v.Name, v.MimeType, v.Value)
X  64: 		if err != nil {
X  65: 			return nil, err
X  66: 		}
X  67: 		m[v.Name] = variable
   68: 	}
X  69: 	return &sparkv1.SetOutputsRequest{Key: jobKey, Variables: m}, nil
   70: }
   71:
X  72: func newGetStageStatusReq(jobKey, stageName string) *sparkv1.GetStageStatusRequest {
X  73: 	return &sparkv1.GetStageStatusRequest{Key: jobKey, Name: stageName}
X  74: }
   75:
   76: /************************************************************************/
   77: // INPUT
   78: /************************************************************************/
   79:
   80: type input struct {
   81: 	variable *sparkv1.Variable
   82: 	err      error
   83: }
   84:
O  85: func newInput(variable *sparkv1.Variable, err error) *input {
O  86: 	return &input{variable: variable, err: err}
O  87: }
   88:
X  89: func (i *input) String() string {
X  90: 	return string(i.variable.Data)
X  91: }
   92:
O  93: func (i *input) Raw() ([]byte, error) {
X  94: 	if i.err != nil {
X  95: 		return nil, i.err
X  96: 	}
   97:
O  98: 	return sparkv1.ConvertBytes(i.variable.Data, i.variable.MimeType)
   99: }
  100:
O 101: func (i *input) Bind(a interface{}) error {
X 102: 	if i.err != nil {
X 103: 		return i.err
X 104: 	}
  105:
X 106: 	if err := sparkv1.UnmarshalBinaryTo(i.variable.Data, a, ""); err != nil {
X 107: 		return err
X 108: 	}
  109:
O 110: 	return nil
  111: }
  112:
  113: /************************************************************************/
  114: // BATCH INPUTS
  115: /************************************************************************/
  116:
  117: type inputs struct {
  118: 	vars map[string]*sparkv1.Variable
  119: 	err  error
  120: }
  121:
O 122: func newInputs(err error, vars map[string]*sparkv1.Variable) Inputs {
O 123: 	return &inputs{vars: vars, err: err}
O 124: }
  125:
O 126: func (v inputs) Get(name string) Bindable {
O 127: 	found, ok := v.vars[name]
O 128: 	if ok {
O 129: 		return newInput(found, v.err)
O 130: 	}
X 131: 	err := v.err
X 132: 	if err == nil {
X 133: 		err = ErrInputVariableNotFound
X 134: 	}
X 135: 	return newInput(nil, v.err)
  136: }
  137:
X 138: func (v inputs) Error() error {
X 139: 	return v.err
X 140: }
  141:
  142: /************************************************************************/
  143: // STAGE RESULT
  144: /************************************************************************/
  145:
  146: type result struct {
  147: 	result *sparkv1.GetStageResultResponse
  148: 	err    error
  149: }
  150:
O 151: func newResult(err error, r *sparkv1.GetStageResultResponse) Bindable {
O 152: 	return &result{
O 153: 		result: r,
O 154: 		err:    err,
O 155: 	}
O 156: }
  157:
O 158: func (r *result) Raw() ([]byte, error) {
X 159: 	if r.err != nil {
X 160: 		return nil, r.err
X 161: 	}
  162:
O 163: 	return sparkv1.ConvertBytes(r.result.Data, "")
  164: }
  165:
O 166: func (r *result) Bind(a interface{}) error {
X 167: 	if r.err != nil {
X 168: 		return r.err
X 169: 	}
  170:
O 171: 	return sparkv1.UnmarshalBinaryTo(r.result.Data, a, "")
  172: }
  173:
X 174: func (r *result) String() string {
X 175: 	return string(r.result.GetData())
X 176: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/helpers.go
   1: package spark_v1
   2:
   3: import (
   4: 	"errors"
   5: )
   6:
O  7: var CompleteSuccess = func(ctx CompleteContext) StageError {
O  8: 	return nil
O  9: }
  10:
X 11: var CompleteError = func(ctx CompleteContext) StageError {
X 12: 	return NewStageError(errors.New("complete failed"))
X 13: }
  14:
O 15: func appendIfNotNil[T any](array []*T, items ...*T) []*T {
O 16: 	for _, item := range items {
O 17: 		if item != nil {
O 18: 			array = append(array, item)
O 19: 		}
  20: 	}
O 21: 	return array
  22: }
  23:
O 24: func addBreadcrumb(nodes ...*node) {
O 25: 	var nextNodes []*node
O 26: 	for _, n := range nodes {
O 27: 		n.cancel.appendBreadcrumb(cancelNodeType, n.breadcrumb)
O 28: 		n.compensate.appendBreadcrumb(compensateNodeType, n.breadcrumb)
O 29: 		nextNodes = appendIfNotNil(nextNodes, n.compensate, n.cancel)
O 30: 	}
O 31: 	if len(nextNodes) > 0 {
O 32: 		addBreadcrumb(nextNodes...)
O 33: 	}
  34: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/io_grpc.go
   1: package spark_v1
   2:
   3: import (
   4: 	"context"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: )
   7:
   8: type variableHandler struct {
   9: 	client sparkv1.ManagerServiceClient
  10: }
  11:
X 12: func newGrpcIOHandler(client sparkv1.ManagerServiceClient) IOHandler {
X 13: 	return variableHandler{client}
X 14: }
  15:
X 16: func (g variableHandler) Inputs(jobKey string, names ...string) Inputs {
X 17: 	variables, err := g.client.GetInputs(context.Background(), newGetVariablesRequest(jobKey, names...))
X 18: 	if err != nil {
X 19: 		return newInputs(err, nil)
X 20: 	}
  21:
X 22: 	return newInputs(err, variables.Variables)
  23: }
  24:
X 25: func (g variableHandler) Input(jobKey, name string) Input {
X 26: 	return g.Inputs(jobKey, name).Get(name)
X 27: }
  28:
X 29: func (g variableHandler) Output(jobKey string, variables ...*Var) error {
X 30: 	request, err := newSetVariablesRequest(jobKey, variables...)
X 31: 	if err != nil {
X 32: 		return err
X 33: 	}
X 34: 	_, err = g.client.SetOutputs(context.Background(), request)
X 35: 	return err
  36: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/io_memory.go
   1: package spark_v1
   2:
   3: import (
   4: 	"fmt"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: 	"testing"
   7: )
   8:
   9: type inMemoryIOHandler struct {
  10: 	variables map[string]*Var
  11: 	t         *testing.T
  12: }
  13:
O 14: func NewInMemoryIOHandler(t *testing.T) TestIOHandler {
O 15: 	i := &inMemoryIOHandler{t: t, variables: map[string]*Var{}}
O 16: 	return i
O 17: }
  18:
O 19: func (i *inMemoryIOHandler) Inputs(jobKey string, names ...string) Inputs {
O 20: 	var (
O 21: 		vars = map[string]*sparkv1.Variable{}
O 22: 		err  error
O 23: 	)
O 24: 	for _, n := range names {
O 25: 		key := i.key(jobKey, n)
O 26: 		if v, ok := i.variables[key]; ok {
O 27: 			var va *sparkv1.Variable
O 28: 			va, err = newVariable(v.Name, v.MimeType, v.Value)
O 29: 			vars[v.Name] = va
O 30: 		}
  31: 	}
X 32: 	if len(vars) == 0 {
X 33: 		i.t.Fatal("no variables found for the params: ")
X 34: 	}
O 35: 	return newInputs(err, vars)
  36: }
  37:
O 38: func (i *inMemoryIOHandler) Input(jobKey, name string) Input {
O 39: 	inputs := i.Inputs(jobKey, name)
O 40: 	return inputs.Get(name)
O 41: }
  42:
X 43: func (i *inMemoryIOHandler) Output(jobKey string, variables ...*Var) error {
X 44: 	for _, v := range variables {
X 45: 		i.variables[i.key(jobKey, v.Name)] = v
X 46: 	}
X 47: 	return nil
  48: }
  49:
O 50: func (i *inMemoryIOHandler) SetVar(jobKey string, v *Var) {
O 51: 	i.variables[i.key(jobKey, v.Name)] = v
O 52: }
  53:
O 54: func (i *inMemoryIOHandler) key(jobKey, name string) string {
O 55: 	return fmt.Sprintf("%s_%s", jobKey, name)
O 56: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/progress_grpc.go
   1: package spark_v1
   2:
   3: import (
   4: 	"context"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: )
   7:
   8: type stageProgressHandler struct {
   9: 	client sparkv1.ManagerServiceClient
  10: }
  11:
X 12: func newGrpcStageProgressHandler(client sparkv1.ManagerServiceClient) StageProgressHandler {
X 13: 	return &stageProgressHandler{client: client}
X 14: }
  15:
X 16: func (g *stageProgressHandler) Get(jobKey, name string) (*sparkv1.StageStatus, error) {
X 17: 	resp, err := g.client.GetStageStatus(context.Background(), newGetStageStatusReq(jobKey, name))
X 18: 	return &resp.Status, err
X 19: }
  20:
X 21: func (g *stageProgressHandler) Set(stageStatus *sparkv1.SetStageStatusRequest) error {
X 22: 	_, err := g.client.SetStageStatus(context.Background(), stageStatus)
X 23: 	return err
X 24: }
  25:
X 26: func (g *stageProgressHandler) GetResult(jobKey, name string) Bindable {
X 27: 	result, err := g.client.GetStageResult(context.Background(), newStageResultReq(jobKey, name))
X 28: 	if err != nil {
X 29: 		return newResult(err, nil)
X 30: 	}
X 31: 	return newResult(nil, result)
  32: }
  33:
X 34: func (g *stageProgressHandler) SetResult(result *sparkv1.SetStageResultRequest) error {
X 35: 	_, err := g.client.SetStageResult(context.Background(), result)
X 36: 	return err
X 37: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/progress_memory.go
    1: package spark_v1
    2:
    3: import (
    4: 	"fmt"
    5: 	"github.com/azarc-io/vth-faas-sdk-go/internal/common"
    6: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
    7: 	"testing"
    8:
    9: 	"github.com/stretchr/testify/assert"
   10: )
   11:
   12: type InMemoryStageProgressHandler struct {
   13: 	t                  *testing.T
   14: 	stages             map[string]*sparkv1.SetStageStatusRequest
   15: 	results            map[string]*sparkv1.SetStageResultRequest
   16: 	behaviourSet       map[string]StageBehaviourParams
   17: 	behaviourSetResult map[string]ResultBehaviourParams
   18: }
   19:
O  20: func NewInMemoryStageProgressHandler(t *testing.T, seeds ...any) TestStageProgressHandler {
O  21: 	handler := InMemoryStageProgressHandler{t,
O  22: 		map[string]*sparkv1.SetStageStatusRequest{}, map[string]*sparkv1.SetStageResultRequest{},
O  23: 		map[string]StageBehaviourParams{}, map[string]ResultBehaviourParams{}}
O  24:
O  25: 	return &handler
O  26: }
   27:
O  28: func (i *InMemoryStageProgressHandler) Get(jobKey, name string) (*sparkv1.StageStatus, error) {
O  29: 	if stage, ok := i.stages[i.key(jobKey, name)]; ok {
O  30: 		return &stage.Status, nil
O  31: 	}
X  32: 	i.t.Fatalf("stage status no found for params >> jobKey: %s, stageName: %s", jobKey, name)
X  33: 	return nil, nil
   34: }
   35:
O  36: func (i *InMemoryStageProgressHandler) Set(stageStatus *sparkv1.SetStageStatusRequest) error {
X  37: 	if bp, ok := i.behaviourSet[stageStatus.Name]; ok {
X  38: 		if bp.status == stageStatus.Status && bp.err != nil {
X  39: 			return bp.err
X  40: 		}
   41: 	}
O  42: 	i.stages[i.key(stageStatus.Key, stageStatus.Name)] = stageStatus
O  43: 	return nil
   44: }
   45:
O  46: func (i *InMemoryStageProgressHandler) GetResult(jobKey, name string) Bindable {
O  47: 	if variable, ok := i.results[i.key(jobKey, name)]; ok {
O  48: 		return newResult(nil, &sparkv1.GetStageResultResponse{
O  49: 			Data: variable.Data,
O  50: 		})
O  51: 	}
X  52: 	i.t.Fatalf("stage result not found for params >> jobKey: %s, stageName: %s", jobKey, name)
X  53: 	return nil
   54: }
   55:
O  56: func (i *InMemoryStageProgressHandler) SetResult(result *sparkv1.SetStageResultRequest) error {
X  57: 	if br, ok := i.behaviourSetResult[result.Name]; ok {
X  58: 		if br.jobKey == result.GetKey() && br.name == result.Name && br.err != nil {
X  59: 			return br.err
X  60: 		}
   61: 	}
O  62: 	i.results[i.key(result.GetKey(), result.Name)] = result
O  63: 	return nil
   64: }
   65:
X  66: func (i *InMemoryStageProgressHandler) AddBehaviour() *Behaviour {
X  67: 	return &Behaviour{i: i}
X  68: }
   69:
X  70: func (i *InMemoryStageProgressHandler) ResetBehaviour() {
X  71: 	i.behaviourSet = map[string]StageBehaviourParams{}
X  72: }
   73:
O  74: func (i *InMemoryStageProgressHandler) AssertStageCompleted(jobKey, stageName string) {
O  75: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_COMPLETED)
O  76: }
   77:
X  78: func (i *InMemoryStageProgressHandler) AssertStageStarted(jobKey, stageName string) {
X  79: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_STARTED)
X  80: }
   81:
O  82: func (i *InMemoryStageProgressHandler) AssertStageSkipped(jobKey, stageName string) {
O  83: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_SKIPPED)
O  84: }
   85:
O  86: func (i *InMemoryStageProgressHandler) AssertStageCancelled(jobKey, stageName string) {
O  87: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_CANCELED)
O  88: }
   89:
O  90: func (i *InMemoryStageProgressHandler) AssertStageFailed(jobKey, stageName string) {
O  91: 	i.assertStageStatus(jobKey, stageName, sparkv1.StageStatus_STAGE_FAILED)
O  92: }
   93:
X  94: func (i *InMemoryStageProgressHandler) AssertStageResult(jobKey, stageName string, expectedStageResult any) {
X  95: 	r := i.GetResult(jobKey, stageName)
X  96: 	resB, err := r.Raw()
X  97: 	if err != nil {
X  98: 		i.t.Error(err)
X  99: 		return
X 100: 	}
X 101: 	req, err := newSetStageResultReq(jobKey, common.MimeTypeJSON, expectedStageResult)
X 102: 	if err != nil {
X 103: 		i.t.Error(err)
X 104: 		return
X 105: 	}
X 106: 	assert.Equal(i.t, req.Data, resB)
  107: }
  108:
O 109: func (i *InMemoryStageProgressHandler) key(jobKey, name string) string {
O 110: 	return fmt.Sprintf("%s_%s", jobKey, name)
O 111: }
  112:
O 113: func (i *InMemoryStageProgressHandler) assertStageStatus(jobKey, stageName string, expectedStatus sparkv1.StageStatus) {
O 114: 	status, err := i.Get(jobKey, stageName)
X 115: 	if err != nil {
X 116: 		i.t.Error(err)
X 117: 		return
X 118: 	}
O 119: 	assert.Equal(i.t, &expectedStatus, status, "spark status expected: '%s' got: '%s'", expectedStatus, status)
  120: }
  121:
  122: type Behaviour struct {
  123: 	i *InMemoryStageProgressHandler
  124: }
  125:
X 126: func (b *Behaviour) Set(stageName string, status sparkv1.StageStatus, err error) *InMemoryStageProgressHandler {
X 127: 	b.i.behaviourSet[stageName] = StageBehaviourParams{err: err, status: status}
X 128: 	return b.i
X 129: }
  130:
X 131: func (b *Behaviour) SetResult(jobKey, stageName string, err error) *InMemoryStageProgressHandler {
X 132: 	b.i.behaviourSetResult[stageName] = ResultBehaviourParams{jobKey: jobKey, name: stageName, err: err}
X 133: 	return b.i
X 134: }
  135:
  136: type StageBehaviourParams struct {
  137: 	err    error
  138: 	status sparkv1.StageStatus
  139: }
  140:
  141: type ResultBehaviourParams struct {
  142: 	jobKey string
  143: 	name   string
  144: 	err    error
  145: }

github.com/azarc-io/vth-faas-sdk-go/pkg/spark/v1/server.go
   1: package spark_v1
   2:
   3: import (
   4: 	"context"
   5: 	sparkv1 "github.com/azarc-io/vth-faas-sdk-go/internal/gen/azarc/sdk/spark/v1"
   6: 	"net"
   7: 	"time"
   8:
   9: 	"google.golang.org/grpc"
  10: 	"google.golang.org/grpc/reflection"
  11: )
  12:
  13: /************************************************************************/
  14: // CONSTANTS
  15: /************************************************************************/
  16:
  17: var connectionTimeout = time.Second * 10
  18:
  19: /************************************************************************/
  20: // TYPES
  21: /************************************************************************/
  22:
  23: type server struct {
  24: 	config *config
  25: 	worker Worker
  26: 	svr    *grpc.Server
  27: }
  28:
  29: /************************************************************************/
  30: // SERVER
  31: /************************************************************************/
  32:
X 33: func newServer(cfg *config, worker Worker) *server {
X 34: 	return &server{config: cfg, worker: worker}
X 35: }
  36:
X 37: func (s *server) start() error {
X 38: 	// LOGGER SAMPLE >> add .Fields(fields) with the spark name on it
X 39: 	log := NewLogger()
X 40:
X 41: 	// nosemgrep
X 42: 	s.svr = grpc.NewServer(grpc.ConnectionTimeout(connectionTimeout))
X 43: 	sparkv1.RegisterAgentServiceServer(s.svr, s)
X 44:
X 45: 	reflection.Register(s.svr)
X 46:
X 47: 	listener, err := net.Listen("tcp", s.config.serverAddress())
X 48: 	if err != nil {
X 49: 		log.Error(err, "error setting up the listener")
X 50: 		return err
X 51: 	}
  52:
  53: 	// nosemgrep
X 54: 	if err = s.svr.Serve(listener); err != nil {
X 55: 		log.Error(err, "error starting the server")
X 56: 		return err
X 57: 	}
X 58: 	return nil
  59: }
  60:
X 61: func (s *server) stop() {
X 62: 	if s.svr != nil {
X 63: 		s.svr.GracefulStop()
X 64: 	}
  65: }
  66:
  67: /************************************************************************/
  68: // RPC IMPLEMENTATIONS
  69: /************************************************************************/
  70:
X 71: func (s *server) ExecuteJob(ctx context.Context, request *sparkv1.ExecuteJobRequest) (*sparkv1.Void, error) {
X 72: 	jobContext := NewSparkMetadata(ctx, request.Key, request.CorrelationId, request.TransactionId, nil)
X 73: 	go func() { // TODO goroutine pool
X 74: 		_ = s.worker.Execute(jobContext)
X 75: 	}()
X 76: 	return &sparkv1.Void{}, nil
  77: }

@waeljammal waeljammal merged commit c0758a4 into main Dec 14, 2022
@waeljammal waeljammal deleted the feat/update-protos branch December 14, 2022 08:31
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging this pull request may close these issues.

None yet

2 participants