/
client.go
994 lines (862 loc) · 47.7 KB
/
client.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
// The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package internal
import (
"context"
"crypto/tls"
"fmt"
"sync/atomic"
"time"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
"go.temporal.io/api/operatorservice/v1"
"go.temporal.io/api/workflowservice/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"go.temporal.io/sdk/converter"
"go.temporal.io/sdk/internal/common/metrics"
ilog "go.temporal.io/sdk/internal/log"
"go.temporal.io/sdk/log"
)
const (
// DefaultNamespace is the namespace name which is used if not passed with options.
DefaultNamespace = "default"
// QueryTypeStackTrace is the build in query type for Client.QueryWorkflow() call. Use this query type to get the call
// stack of the workflow. The result will be a string encoded in the EncodedValue.
QueryTypeStackTrace string = "__stack_trace"
// QueryTypeOpenSessions is the build in query type for Client.QueryWorkflow() call. Use this query type to get all open
// sessions in the workflow. The result will be a list of SessionInfo encoded in the EncodedValue.
QueryTypeOpenSessions string = "__open_sessions"
)
type (
// Client is the client for starting and getting information about a workflow executions as well as
// completing activities asynchronously.
Client interface {
// ExecuteWorkflow starts a workflow execution and return a WorkflowRun instance and error
// The user can use this to start using a function or workflow type name.
// Either by
// ExecuteWorkflow(ctx, options, "workflowTypeName", arg1, arg2, arg3)
// or
// ExecuteWorkflow(ctx, options, workflowExecuteFn, arg1, arg2, arg3)
// The errors it can return:
// - serviceerror.NamespaceNotFound, if namespace does not exist
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
//
// The current timeout resolution implementation is in seconds and uses math.Ceil(d.Seconds()) as the duration. But is
// subjected to change in the future.
//
// WorkflowRun has three methods:
// - GetID() string: which return workflow ID (which is same as StartWorkflowOptions.ID if provided)
// - GetRunID() string: which return the first started workflow run ID (please see below)
// - Get(ctx context.Context, valuePtr interface{}) error: which will fill the workflow
// execution result to valuePtr, if workflow execution is a success, or return corresponding
// error. This is a blocking API.
// NOTE: if the started workflow return ContinueAsNewError during the workflow execution, the
// return result of GetRunID() will be the started workflow run ID, not the new run ID caused by ContinueAsNewError,
// however, Get(ctx context.Context, valuePtr interface{}) will return result from the run which did not return ContinueAsNewError.
// Say ExecuteWorkflow started a workflow, in its first run, has run ID "run ID 1", and returned ContinueAsNewError,
// the second run has run ID "run ID 2" and return some result other than ContinueAsNewError:
// GetRunID() will always return "run ID 1" and Get(ctx context.Context, valuePtr interface{}) will return the result of second run.
// NOTE: DO NOT USE THIS API INSIDE A WORKFLOW, USE workflow.ExecuteChildWorkflow instead
ExecuteWorkflow(ctx context.Context, options StartWorkflowOptions, workflow interface{}, args ...interface{}) (WorkflowRun, error)
// GetWorkflow retrieves a workflow execution and return a WorkflowRun instance
// - workflow ID of the workflow.
// - runID can be default(empty string). if empty string then it will pick the last running execution of that workflow ID.
//
// WorkflowRun has three methods:
// - GetID() string: which return workflow ID (which is same as StartWorkflowOptions.ID if provided)
// - GetRunID() string: which return the first started workflow run ID (please see below)
// - Get(ctx context.Context, valuePtr interface{}) error: which will fill the workflow
// execution result to valuePtr, if workflow execution is a success, or return corresponding
// error. This is a blocking API.
// NOTE: if the retrieved workflow returned ContinueAsNewError during the workflow execution, the
// return result of GetRunID() will be the retrieved workflow run ID, not the new run ID caused by ContinueAsNewError,
// however, Get(ctx context.Context, valuePtr interface{}) will return result from the run which did not return ContinueAsNewError.
GetWorkflow(ctx context.Context, workflowID string, runID string) WorkflowRun
// SignalWorkflow sends a signals to a workflow in execution
// - workflow ID of the workflow.
// - runID can be default(empty string). if empty string then it will pick the running execution of that workflow ID.
// - signalName name to identify the signal.
// The errors it can return:
// - serviceerror.NotFound
// - serviceerror.Internal
// - serviceerror.Unavailable
SignalWorkflow(ctx context.Context, workflowID string, runID string, signalName string, arg interface{}) error
// SignalWithStartWorkflow sends a signal to a running workflow.
// If the workflow is not running or not found, it starts the workflow and then sends the signal in transaction.
// - workflowID, signalName, signalArg are same as SignalWorkflow's parameters
// - options, workflow, workflowArgs are same as StartWorkflow's parameters
// - the workflowID parameter is used instead of options.ID. If the latter is present, it must match the workflowID.
// Note: options.WorkflowIDReusePolicy is default to AllowDuplicate.
// The errors it can return:
// - serviceerror.NotFound
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
SignalWithStartWorkflow(ctx context.Context, workflowID string, signalName string, signalArg interface{},
options StartWorkflowOptions, workflow interface{}, workflowArgs ...interface{}) (WorkflowRun, error)
// CancelWorkflow cancels a workflow in execution
// - workflow ID of the workflow.
// - runID can be default(empty string). if empty string then it will pick the running execution of that workflow ID.
// The errors it can return:
// - serviceerror.NotFound
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
CancelWorkflow(ctx context.Context, workflowID string, runID string) error
// TerminateWorkflow terminates a workflow execution.
// workflowID is required, other parameters are optional.
// - workflow ID of the workflow.
// - runID can be default(empty string). if empty string then it will pick the running execution of that workflow ID.
// The errors it can return:
// - serviceerror.NotFound
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
TerminateWorkflow(ctx context.Context, workflowID string, runID string, reason string, details ...interface{}) error
// GetWorkflowHistory gets history events of a particular workflow
// - workflow ID of the workflow.
// - runID can be default(empty string). if empty string then it will pick the last running execution of that workflow ID.
// - whether use long poll for tracking new events: when the workflow is running, there can be new events generated during iteration
// of HistoryEventIterator, if isLongPoll == true, then iterator will do long poll, tracking new history event, i.e. the iteration
// will not be finished until workflow is finished; if isLongPoll == false, then iterator will only return current history events.
// - whether return all history events or just the last event, which contains the workflow execution end result
// Example:-
// To iterate all events,
// iter := GetWorkflowHistory(ctx, workflowID, runID, isLongPoll, filterType)
// events := []*shared.HistoryEvent{}
// for iter.HasNext() {
// event, err := iter.Next()
// if err != nil {
// return err
// }
// events = append(events, event)
// }
GetWorkflowHistory(ctx context.Context, workflowID string, runID string, isLongPoll bool, filterType enumspb.HistoryEventFilterType) HistoryEventIterator
// CompleteActivity reports activity completed.
// activity Execute method can return activity.ErrResultPending to
// indicate the activity is not completed when it's Execute method returns. In that case, this CompleteActivity() method
// should be called when that activity is completed with the actual result and error. If err is nil, activity task
// completed event will be reported; if err is CanceledError, activity task canceled event will be reported; otherwise,
// activity task failed event will be reported.
// An activity implementation should use GetActivityInfo(ctx).TaskToken function to get task token to use for completion.
// Example:-
// To complete with a result.
// CompleteActivity(token, "Done", nil)
// To fail the activity with an error.
// CompleteActivity(token, nil, temporal.NewApplicationError("reason", details)
// The activity can fail with below errors ApplicationError, TimeoutError, CanceledError.
CompleteActivity(ctx context.Context, taskToken []byte, result interface{}, err error) error
// CompleteActivityByID reports activity completed.
// Similar to CompleteActivity, but may save user from keeping taskToken info.
// activity Execute method can return activity.ErrResultPending to
// indicate the activity is not completed when it's Execute method returns. In that case, this CompleteActivityById() method
// should be called when that activity is completed with the actual result and error. If err is nil, activity task
// completed event will be reported; if err is CanceledError, activity task canceled event will be reported; otherwise,
// activity task failed event will be reported.
// An activity implementation should use activityID provided in ActivityOption to use for completion.
// namespace name, workflowID, activityID are required, runID is optional.
// The errors it can return:
// - ApplicationError
// - TimeoutError
// - CanceledError
CompleteActivityByID(ctx context.Context, namespace, workflowID, runID, activityID string, result interface{}, err error) error
// RecordActivityHeartbeat records heartbeat for an activity.
// details - is the progress you want to record along with heart beat for this activity.
// The errors it can return:
// - serviceerror.NotFound
// - serviceerror.Internal
// - serviceerror.Unavailable
RecordActivityHeartbeat(ctx context.Context, taskToken []byte, details ...interface{}) error
// RecordActivityHeartbeatByID records heartbeat for an activity.
// details - is the progress you want to record along with heart beat for this activity.
// The errors it can return:
// - serviceerror.NotFound
// - serviceerror.Internal
// - serviceerror.Unavailable
RecordActivityHeartbeatByID(ctx context.Context, namespace, workflowID, runID, activityID string, details ...interface{}) error
// ListClosedWorkflow gets closed workflow executions based on request filters
// The errors it can return:
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
// - serviceerror.NamespaceNotFound
ListClosedWorkflow(ctx context.Context, request *workflowservice.ListClosedWorkflowExecutionsRequest) (*workflowservice.ListClosedWorkflowExecutionsResponse, error)
// ListOpenWorkflow gets open workflow executions based on request filters
// The errors it can return:
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
// - serviceerror.NamespaceNotFound
ListOpenWorkflow(ctx context.Context, request *workflowservice.ListOpenWorkflowExecutionsRequest) (*workflowservice.ListOpenWorkflowExecutionsResponse, error)
// ListWorkflow gets workflow executions based on query.The query is basically the SQL WHERE clause,
// examples:
// - "(WorkflowID = 'wid1' or (WorkflowType = 'type2' and WorkflowID = 'wid2'))".
// - "CloseTime between '2019-08-27T15:04:05+00:00' and '2019-08-28T15:04:05+00:00'".
// - to list only open workflow use "CloseTime is null"
// Retrieved workflow executions are sorted by StartTime in descending order when list open workflow,
// and sorted by CloseTime in descending order for other queries.
// For supported operations on different server versions see [Visibility].
// The errors it can return:
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
//
// [Visibility]: https://docs.temporal.io/visibility
ListWorkflow(ctx context.Context, request *workflowservice.ListWorkflowExecutionsRequest) (*workflowservice.ListWorkflowExecutionsResponse, error)
// ListArchivedWorkflow gets archived workflow executions based on query. This API will return BadRequest if Temporal
// cluster or target namespace is not configured for visibility archival or read is not enabled. The query is basically the SQL WHERE clause.
// However, different visibility archivers have different limitations on the query. Please check the documentation of the visibility archiver used
// by your namespace to see what kind of queries are accept and whether retrieved workflow executions are ordered or not.
// The errors it can return:
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
ListArchivedWorkflow(ctx context.Context, request *workflowservice.ListArchivedWorkflowExecutionsRequest) (*workflowservice.ListArchivedWorkflowExecutionsResponse, error)
// ScanWorkflow gets workflow executions based on query. The query is basically the SQL WHERE clause
// (see ListWorkflow for query examples).
// ScanWorkflow should be used when retrieving large amount of workflows and order is not needed.
// It will use more resources than ListWorkflow, but will be several times faster
// when retrieving millions of workflows.
// For supported operations on different server versions see [Visibility].
// The errors it can return:
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
// [Visibility]: https://docs.temporal.io/visibility
ScanWorkflow(ctx context.Context, request *workflowservice.ScanWorkflowExecutionsRequest) (*workflowservice.ScanWorkflowExecutionsResponse, error)
// CountWorkflow gets number of workflow executions based on query. The query is basically the SQL WHERE clause
// (see ListWorkflow for query examples).
// For supported operations on different server versions see [Visibility].
// The errors it can return:
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
//
// [Visibility]: https://docs.temporal.io/visibility
CountWorkflow(ctx context.Context, request *workflowservice.CountWorkflowExecutionsRequest) (*workflowservice.CountWorkflowExecutionsResponse, error)
// GetSearchAttributes returns valid search attributes keys and value types.
// The search attributes can be used in query of List/Scan/Count APIs. Adding new search attributes requires temporal server
// to update dynamic config ValidSearchAttributes.
GetSearchAttributes(ctx context.Context) (*workflowservice.GetSearchAttributesResponse, error)
// QueryWorkflow queries a given workflow execution and returns the query result synchronously. Parameter workflowID
// and queryType are required, other parameters are optional. The workflowID and runID (optional) identify the
// target workflow execution that this query will be send to. If runID is not specified (empty string), server will
// use the currently running execution of that workflowID. The queryType specifies the type of query you want to
// run. By default, temporal supports "__stack_trace" as a standard query type, which will return string value
// representing the call stack of the target workflow. The target workflow could also setup different query handler
// to handle custom query types.
// See comments at workflow.SetQueryHandler(ctx Context, queryType string, handler interface{}) for more details
// on how to setup query handler within the target workflow.
// - workflowID is required.
// - runID can be default(empty string). if empty string then it will pick the running execution of that workflow ID.
// - queryType is the type of the query.
// - args... are the optional query parameters.
// The errors it can return:
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
// - serviceerror.NotFound
// - serviceerror.QueryFailed
QueryWorkflow(ctx context.Context, workflowID string, runID string, queryType string, args ...interface{}) (converter.EncodedValue, error)
// QueryWorkflowWithOptions queries a given workflow execution and returns the query result synchronously.
// See QueryWorkflowWithOptionsRequest and QueryWorkflowWithOptionsResponse for more information.
// The errors it can return:
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
// - serviceerror.NotFound
// - serviceerror.QueryFailed
QueryWorkflowWithOptions(ctx context.Context, request *QueryWorkflowWithOptionsRequest) (*QueryWorkflowWithOptionsResponse, error)
// DescribeWorkflowExecution returns information about the specified workflow execution.
// The errors it can return:
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
// - serviceerror.NotFound
DescribeWorkflowExecution(ctx context.Context, workflowID, runID string) (*workflowservice.DescribeWorkflowExecutionResponse, error)
// DescribeTaskQueue returns information about the target taskqueue, right now this API returns the
// pollers which polled this taskqueue in last few minutes.
// The errors it can return:
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
// - serviceerror.NotFound
DescribeTaskQueue(ctx context.Context, taskqueue string, taskqueueType enumspb.TaskQueueType) (*workflowservice.DescribeTaskQueueResponse, error)
// ResetWorkflowExecution reset an existing workflow execution to WorkflowTaskFinishEventId(exclusive).
// And it will immediately terminating the current execution instance.
// RequestId is used to deduplicate requests. It will be autogenerated if not set.
ResetWorkflowExecution(ctx context.Context, request *workflowservice.ResetWorkflowExecutionRequest) (*workflowservice.ResetWorkflowExecutionResponse, error)
// UpdateWorkerBuildIdCompatibility allows you to update the worker-build-id based version sets for a particular
// task queue. This is used in conjunction with workers who specify their build id and thus opt into the
// feature.
UpdateWorkerBuildIdCompatibility(ctx context.Context, options *UpdateWorkerBuildIdCompatibilityOptions) error
// GetWorkerBuildIdCompatibility returns the worker-build-id based version sets for a particular task queue.
GetWorkerBuildIdCompatibility(ctx context.Context, options *GetWorkerBuildIdCompatibilityOptions) (*WorkerBuildIDVersionSets, error)
// GetWorkerTaskReachability returns which versions are is still in use by open or closed workflows.
GetWorkerTaskReachability(ctx context.Context, options *GetWorkerTaskReachabilityOptions) (*WorkerTaskReachability, error)
// CheckHealth performs a server health check using the gRPC health check
// API. If the check fails, an error is returned.
CheckHealth(ctx context.Context, request *CheckHealthRequest) (*CheckHealthResponse, error)
// UpdateWorkflow issues an update request to the specified
// workflow execution and returns the result synchronously. Calling this
// function is equivalent to calling UpdateWorkflowWithOptions with
// the same arguments and indicating that the RPC call should wait for
// completion of the update process.
// NOTE: Experimental
UpdateWorkflow(ctx context.Context, workflowID string, workflowRunID string, updateName string, args ...interface{}) (WorkflowUpdateHandle, error)
// UpdateWorkflowWithOptions issues an update request to the
// specified workflow execution and returns a handle to the update that
// is running in in parallel with the calling thread. Errors returned
// from the server will be exposed through the return value of
// WorkflowExecutionUpdateHandle.Get(). Errors that occur before the
// update is requested (e.g. if the required workflow ID field is
// missing from the UpdateWorkflowWithOptionsRequest) are returned
// directly from this function call.
// NOTE: Experimental
UpdateWorkflowWithOptions(ctx context.Context, request *UpdateWorkflowWithOptionsRequest) (WorkflowUpdateHandle, error)
// GetWorkflowUpdateHandle creates a handle to the referenced update
// which can be polled for an outcome. Note that runID is optional and
// if not specified the most recent runID will be used.
// NOTE: Experimental
GetWorkflowUpdateHandle(GetWorkflowUpdateHandleOptions) WorkflowUpdateHandle
// WorkflowService provides access to the underlying gRPC service. This should only be used for advanced use cases
// that cannot be accomplished via other Client methods. Unlike calls to other Client methods, calls directly to the
// service are not configured with internal semantics such as automatic retries.
WorkflowService() workflowservice.WorkflowServiceClient
// OperatorService creates a new operator service client with the same gRPC connection as this client.
OperatorService() operatorservice.OperatorServiceClient
// Schedule creates a new shedule client with the same gRPC connection as this client.
ScheduleClient() ScheduleClient
// Close client and clean up underlying resources.
Close()
}
// ClientOptions are optional parameters for Client creation.
ClientOptions struct {
// Optional: To set the host:port for this client to connect to.
// default: localhost:7233
//
// This is a gRPC address and therefore can also support a special-formatted address of "<resolver>:///<value>" that
// will use a registered resolver. By default all hosts returned from the resolver will be used in a round-robin
// fashion.
//
// The "dns" resolver is registered by default. Using a "dns:///" prefixed address will periodically resolve all IPs
// for DNS address given and round robin amongst them.
//
// A custom resolver can be created to provide multiple hosts in other ways. For example, to manually provide
// multiple IPs to round-robin across, a google.golang.org/grpc/resolver/manual resolver can be created and
// registered with google.golang.org/grpc/resolver with a custom scheme:
// builder := manual.NewBuilderWithScheme("myresolver")
// builder.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: "1.2.3.4:1234"}, {Addr: "2.3.4.5:2345"}}})
// resolver.Register(builder)
// c, err := client.Dial(client.Options{HostPort: "myresolver:///ignoredvalue"})
// Other more advanced resolvers can also be registered.
HostPort string
// Optional: To set the namespace name for this client to work with.
// default: default
Namespace string
// Optional: Set the credentials for this client.
Credentials Credentials
// Optional: Logger framework can use to log.
// default: default logger provided.
Logger log.Logger
// Optional: Metrics handler for reporting metrics.
// default: no metrics.
MetricsHandler metrics.Handler
// Optional: Sets an identify that can be used to track this host for debugging.
// default: default identity that include hostname, groupName and process ID.
Identity string
// Optional: Sets DataConverter to customize serialization/deserialization of arguments in Temporal
// default: defaultDataConverter, an combination of google protobuf converter, gogo protobuf converter and json converter
DataConverter converter.DataConverter
// Optional: Sets FailureConverter to customize serialization/deserialization of errors.
// default: temporal.DefaultFailureConverter, does not encode any fields of the error. Use temporal.NewDefaultFailureConverter
// options to configure or create a custom converter.
FailureConverter converter.FailureConverter
// Optional: Sets ContextPropagators that allows users to control the context information passed through a workflow
// default: nil
ContextPropagators []ContextPropagator
// Optional: Sets options for server connection that allow users to control features of connections such as TLS settings.
// default: no extra options
ConnectionOptions ConnectionOptions
// Optional: HeadersProvider will be invoked on every outgoing gRPC request and gives user ability to
// set custom request headers. This can be used to set auth headers for example.
HeadersProvider HeadersProvider
// Optional parameter that is designed to be used *in tests*. It gets invoked last in
// the gRPC interceptor chain and can be used to induce artificial failures in test scenarios.
TrafficController TrafficController
// Interceptors to apply to some calls of the client. Earlier interceptors
// wrap later interceptors.
//
// Any interceptors that also implement Interceptor (meaning they implement
// WorkerInterceptor in addition to ClientInterceptor) will be used for
// worker interception as well. When worker interceptors are here and in
// worker options, the ones here wrap the ones in worker options. The same
// interceptor should not be set here and in worker options.
Interceptors []ClientInterceptor
}
// HeadersProvider returns a map of gRPC headers that should be used on every request.
HeadersProvider interface {
GetHeaders(ctx context.Context) (map[string]string, error)
}
// TrafficController is getting called in the interceptor chain with API invocation parameters.
// Result is either nil if API call is allowed or an error, in which case request would be interrupted and
// the error will be propagated back through the interceptor chain.
TrafficController interface {
CheckCallAllowed(ctx context.Context, method string, req, reply interface{}) error
}
// ConnectionOptions is provided by SDK consumers to control optional connection params.
ConnectionOptions struct {
// TLS configures connection level security credentials.
TLS *tls.Config
// Authority specifies the value to be used as the :authority pseudo-header.
// This value only used when TLS is nil.
Authority string
// Disable keep alive ping from client to the server.
DisableKeepAliveCheck bool
// After a duration of this time if the client doesn't see any activity it
// pings the server to see if the transport is still alive.
// If set below 10s, a minimum value of 10s will be used instead.
// default: 30s
KeepAliveTime time.Duration
// After having pinged for keepalive check, the client waits for a duration
// of Timeout and if no activity is seen even after that the connection is
// closed.
// default: 15s
KeepAliveTimeout time.Duration
// GetSystemInfoTimeout is the timeout for the RPC made by the
// client to fetch server capabilities.
GetSystemInfoTimeout time.Duration
// if true, when there are no active RPCs, Time and Timeout will be ignored and no
// keepalive pings will be sent.
// If false, client sends keepalive pings even with no active RPCs
// default: false
DisableKeepAlivePermitWithoutStream bool
// MaxPayloadSize is a number of bytes that gRPC would allow to travel to and from server. Defaults to 128 MB.
MaxPayloadSize int
// Advanced dial options for gRPC connections. These are applied after the internal default dial options are
// applied. Therefore any dial options here may override internal ones.
//
// For gRPC interceptors, internal interceptors such as error handling, metrics, and retrying are done via
// grpc.WithChainUnaryInterceptor. Therefore to add inner interceptors that are wrapped by those, a
// grpc.WithChainUnaryInterceptor can be added as an option here. To add a single outer interceptor, a
// grpc.WithUnaryInterceptor option can be added since grpc.WithUnaryInterceptor is prepended to chains set with
// grpc.WithChainUnaryInterceptor.
DialOptions []grpc.DialOption
// Hidden for use by client overloads.
disableEagerConnection bool
// Internal atomic that, when true, will not retry internal errors like
// other gRPC errors. If not present during service client creation, it will
// be created as false. This is set to true when server capabilities are
// fetched.
excludeInternalFromRetry *atomic.Bool
}
// StartWorkflowOptions configuration parameters for starting a workflow execution.
// The current timeout resolution implementation is in seconds and uses math.Ceil(d.Seconds()) as the duration. But is
// subjected to change in the future.
StartWorkflowOptions struct {
// ID - The business identifier of the workflow execution.
// Optional: defaulted to a uuid.
ID string
// TaskQueue - The workflow tasks of the workflow are scheduled on the queue with this name.
// This is also the name of the activity task queue on which activities are scheduled.
// The workflow author can choose to override this using activity options.
// Mandatory: No default.
TaskQueue string
// WorkflowExecutionTimeout - The timeout for duration of workflow execution.
// It includes retries and continue as new. Use WorkflowRunTimeout to limit execution time
// of a single workflow run.
// The resolution is seconds.
// Optional: defaulted to unlimited.
WorkflowExecutionTimeout time.Duration
// WorkflowRunTimeout - The timeout for duration of a single workflow run.
// The resolution is seconds.
// Optional: defaulted to WorkflowExecutionTimeout.
WorkflowRunTimeout time.Duration
// WorkflowTaskTimeout - The timeout for processing workflow task from the time the worker
// pulled this task. If a workflow task is lost, it is retried after this timeout.
// The resolution is seconds.
// Optional: defaulted to 10 secs.
WorkflowTaskTimeout time.Duration
// WorkflowIDReusePolicy - Whether server allow reuse of workflow ID, can be useful
// for dedupe logic if set to RejectDuplicate.
// Optional: defaulted to AllowDuplicate.
WorkflowIDReusePolicy enumspb.WorkflowIdReusePolicy
// When WorkflowExecutionErrorWhenAlreadyStarted is true, Client.ExecuteWorkflow will return an error if the
// workflow id has already been used and WorkflowIDReusePolicy would disallow a re-run. If it is set to false,
// rather than erroring a WorkflowRun instance representing the current or last run will be returned.
//
// Optional: defaults to false
WorkflowExecutionErrorWhenAlreadyStarted bool
// RetryPolicy - Optional retry policy for workflow. If a retry policy is specified, in case of workflow failure
// server will start new workflow execution if needed based on the retry policy.
RetryPolicy *RetryPolicy
// CronSchedule - Optional cron schedule for workflow. If a cron schedule is specified, the workflow will run
// as a cron based on the schedule. The scheduling will be based on UTC time. Schedule for next run only happen
// after the current run is completed/failed/timeout. If a RetryPolicy is also supplied, and the workflow failed
// or timeout, the workflow will be retried based on the retry policy. While the workflow is retrying, it won't
// schedule its next run. If next schedule is due while workflow is running (or retrying), then it will skip that
// schedule. Cron workflow will not stop until it is terminated or canceled (by returning temporal.CanceledError).
// The cron spec is as following:
// ┌───────────── minute (0 - 59)
// │ ┌───────────── hour (0 - 23)
// │ │ ┌───────────── day of the month (1 - 31)
// │ │ │ ┌───────────── month (1 - 12)
// │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday)
// │ │ │ │ │
// │ │ │ │ │
// * * * * *
CronSchedule string
// Memo - Optional non-indexed info that will be shown in list workflow.
Memo map[string]interface{}
// SearchAttributes - Optional indexed info that can be used in query of List/Scan/Count workflow APIs. The key and value type must be registered on Temporal server side.
// Use GetSearchAttributes API to get valid key and corresponding value type.
// For supported operations on different server versions see [Visibility].
//
// Deprecated: use TypedSearchAttributes instead.
//
// [Visibility]: https://docs.temporal.io/visibility
SearchAttributes map[string]interface{}
// TypedSearchAttributes - Specifies Search Attributes that will be attached to the Workflow. Search Attributes are
// additional indexed information attributed to workflow and used for search and visibility. The search attributes
// can be used in query of List/Scan/Count workflow APIs. The key and its value type must be registered on Temporal
// server side. For supported operations on different server versions see [Visibility].
//
// Optional: default to none.
//
// [Visibility]: https://docs.temporal.io/visibility
TypedSearchAttributes SearchAttributes
// EnableEagerStart - request eager execution for this workflow, if a local worker is available.
//
// WARNING: Eager start does not respect worker versioning. An eagerly started workflow may run on
// any available local worker even if that worker is not in the default build ID set.
//
// NOTE: Experimental
EnableEagerStart bool
// StartDelay - Time to wait before dispatching the first workflow task.
// If the workflow gets a signal before the delay, a workflow task will be dispatched and the rest
// of the delay will be ignored. A signal from signal with start will not trigger a workflow task.
// Cannot be set the same time as a CronSchedule.
StartDelay time.Duration
}
// RetryPolicy defines the retry policy.
// Note that the history of activity with retry policy will be different: the started event will be written down into
// history only when the activity completes or "finally" timeouts/fails. And the started event only records the last
// started time. Because of that, to check an activity has started or not, you cannot rely on history events. Instead,
// you can use CLI to describe the workflow to see the status of the activity:
// tctl --ns <namespace> wf desc -w <wf-id>
RetryPolicy struct {
// Backoff interval for the first retry. If BackoffCoefficient is 1.0 then it is used for all retries.
// If not set or set to 0, a default interval of 1s will be used.
InitialInterval time.Duration
// Coefficient used to calculate the next retry backoff interval.
// The next retry interval is previous interval multiplied by this coefficient.
// Must be 1 or larger. Default is 2.0.
BackoffCoefficient float64
// Maximum backoff interval between retries. Exponential backoff leads to interval increase.
// This value is the cap of the interval. Default is 100x of initial interval.
MaximumInterval time.Duration
// Maximum number of attempts. When exceeded the retries stop even if not expired yet.
// If not set or set to 0, it means unlimited, and rely on activity ScheduleToCloseTimeout to stop.
MaximumAttempts int32
// Non-Retriable errors. This is optional. Temporal server will stop retry if error type matches this list.
// Note:
// - cancellation is not a failure, so it won't be retried,
// - only StartToClose or Heartbeat timeouts are retryable.
NonRetryableErrorTypes []string
}
// NamespaceClient is the client for managing operations on the namespace.
// CLI, tools, ... can use this layer to manager operations on namespace.
NamespaceClient interface {
// Register a namespace with temporal server
// The errors it can throw:
// - NamespaceAlreadyExistsError
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
Register(ctx context.Context, request *workflowservice.RegisterNamespaceRequest) error
// Describe a namespace. The namespace has 3 part of information
// NamespaceInfo - Which has Name, Status, Description, Owner Email
// NamespaceConfiguration - Configuration like Workflow Execution Retention Period In Days, Whether to emit metrics.
// ReplicationConfiguration - replication config like clusters and active cluster name
// The errors it can throw:
// - serviceerror.NamespaceNotFound
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
Describe(ctx context.Context, name string) (*workflowservice.DescribeNamespaceResponse, error)
// Update a namespace.
// The errors it can throw:
// - serviceerror.NamespaceNotFound
// - serviceerror.InvalidArgument
// - serviceerror.Internal
// - serviceerror.Unavailable
Update(ctx context.Context, request *workflowservice.UpdateNamespaceRequest) error
// Close client and clean up underlying resources.
Close()
}
)
// Credentials are optional credentials that can be specified in ClientOptions.
type Credentials interface {
applyToOptions(*ClientOptions) error
// Can return nil to have no interceptor
gRPCInterceptor() grpc.UnaryClientInterceptor
}
// DialClient creates a client and attempts to connect to the server.
func DialClient(ctx context.Context, options ClientOptions) (Client, error) {
options.ConnectionOptions.disableEagerConnection = false
return NewClient(ctx, options)
}
// NewLazyClient creates a client and does not attempt to connect to the server.
func NewLazyClient(options ClientOptions) (Client, error) {
options.ConnectionOptions.disableEagerConnection = true
return NewClient(context.Background(), options)
}
// NewClient creates an instance of a workflow client
//
// Deprecated: Use DialClient or NewLazyClient instead.
func NewClient(ctx context.Context, options ClientOptions) (Client, error) {
return newClient(ctx, options, nil)
}
// NewClientFromExisting creates a new client using the same connection as the
// existing client.
func NewClientFromExisting(ctx context.Context, existingClient Client, options ClientOptions) (Client, error) {
existing, _ := existingClient.(*WorkflowClient)
if existing == nil {
return nil, fmt.Errorf("existing client must have been created directly from a client package call")
}
return newClient(ctx, options, existing)
}
func newClient(ctx context.Context, options ClientOptions, existing *WorkflowClient) (Client, error) {
if options.Namespace == "" {
options.Namespace = DefaultNamespace
}
// Initialize root tags
if options.MetricsHandler == nil {
options.MetricsHandler = metrics.NopHandler
}
options.MetricsHandler = options.MetricsHandler.WithTags(metrics.RootTags(options.Namespace))
if options.HostPort == "" {
options.HostPort = LocalHostPort
}
if options.Logger == nil {
options.Logger = ilog.NewDefaultLogger()
options.Logger.Info("No logger configured for temporal client. Created default one.")
}
if options.Credentials != nil {
if err := options.Credentials.applyToOptions(&options); err != nil {
return nil, err
}
}
// Dial or use existing connection
var connection *grpc.ClientConn
var err error
if existing == nil {
options.ConnectionOptions.excludeInternalFromRetry = &atomic.Bool{}
connection, err = dial(newDialParameters(&options, options.ConnectionOptions.excludeInternalFromRetry))
if err != nil {
return nil, err
}
} else {
connection = existing.conn
}
client := NewServiceClient(workflowservice.NewWorkflowServiceClient(connection), connection, options)
// If using existing connection, always load its capabilities and use them for
// the new connection. Otherwise, only load server capabilities eagerly if not
// disabled.
if existing != nil {
if client.capabilities, err = existing.loadCapabilities(ctx, options.ConnectionOptions.GetSystemInfoTimeout); err != nil {
return nil, err
}
client.unclosedClients = existing.unclosedClients
} else {
if !options.ConnectionOptions.disableEagerConnection {
if _, err := client.loadCapabilities(ctx, options.ConnectionOptions.GetSystemInfoTimeout); err != nil {
client.Close()
return nil, err
}
}
var unclosedClients int32
client.unclosedClients = &unclosedClients
}
atomic.AddInt32(client.unclosedClients, 1)
return client, nil
}
func newDialParameters(options *ClientOptions, excludeInternalFromRetry *atomic.Bool) dialParameters {
return dialParameters{
UserConnectionOptions: options.ConnectionOptions,
HostPort: options.HostPort,
RequiredInterceptors: requiredInterceptors(
options.MetricsHandler,
options.HeadersProvider,
options.TrafficController,
excludeInternalFromRetry,
options.Credentials,
),
DefaultServiceConfig: defaultServiceConfig,
}
}
// NewServiceClient creates workflow client from workflowservice.WorkflowServiceClient. Must be used internally in unit tests only.
func NewServiceClient(workflowServiceClient workflowservice.WorkflowServiceClient, conn *grpc.ClientConn, options ClientOptions) *WorkflowClient {
// Namespace can be empty in unit tests.
if options.Namespace == "" {
options.Namespace = DefaultNamespace
}
if options.Identity == "" {
options.Identity = getWorkerIdentity("")
}
if options.DataConverter == nil {
options.DataConverter = converter.GetDefaultDataConverter()
}
if options.FailureConverter == nil {
options.FailureConverter = GetDefaultFailureConverter()
}
if options.MetricsHandler == nil {
options.MetricsHandler = metrics.NopHandler
}
if options.ConnectionOptions.excludeInternalFromRetry == nil {
options.ConnectionOptions.excludeInternalFromRetry = &atomic.Bool{}
}
// Collect set of applicable worker interceptors
var workerInterceptors []WorkerInterceptor
for _, interceptor := range options.Interceptors {
if workerInterceptor, _ := interceptor.(WorkerInterceptor); workerInterceptor != nil {
workerInterceptors = append(workerInterceptors, workerInterceptor)
}
}
client := &WorkflowClient{
workflowService: workflowServiceClient,
conn: conn,
namespace: options.Namespace,
registry: newRegistry(),
metricsHandler: options.MetricsHandler,
logger: options.Logger,
identity: options.Identity,
dataConverter: options.DataConverter,
failureConverter: options.FailureConverter,
contextPropagators: options.ContextPropagators,
workerInterceptors: workerInterceptors,
excludeInternalFromRetry: options.ConnectionOptions.excludeInternalFromRetry,
eagerDispatcher: &eagerWorkflowDispatcher{
workersByTaskQueue: make(map[string][]eagerWorker),
},
}
// Create outbound interceptor by wrapping backwards through chain
client.interceptor = &workflowClientInterceptor{client: client}
for i := len(options.Interceptors) - 1; i >= 0; i-- {
client.interceptor = options.Interceptors[i].InterceptClient(client.interceptor)
}
return client
}
// NewNamespaceClient creates an instance of a namespace client, to manager lifecycle of namespaces.
func NewNamespaceClient(options ClientOptions) (NamespaceClient, error) {
// Initialize root tags
if options.MetricsHandler == nil {
options.MetricsHandler = metrics.NopHandler
}
options.MetricsHandler = options.MetricsHandler.WithTags(metrics.RootTags(metrics.NoneTagValue))
if options.HostPort == "" {
options.HostPort = LocalHostPort
}
connection, err := dial(newDialParameters(&options, nil))
if err != nil {
return nil, err
}
return newNamespaceServiceClient(workflowservice.NewWorkflowServiceClient(connection), connection, options), nil
}
func newNamespaceServiceClient(workflowServiceClient workflowservice.WorkflowServiceClient, clientConn *grpc.ClientConn, options ClientOptions) NamespaceClient {
if options.Identity == "" {
options.Identity = getWorkerIdentity("")
}
return &namespaceClient{
workflowService: workflowServiceClient,
connectionCloser: clientConn,
metricsHandler: options.MetricsHandler,
logger: options.Logger,
identity: options.Identity,
}
}
// NewValue creates a new converter.EncodedValue which can be used to decode binary data returned by Temporal. For example:
// User had Activity.RecordHeartbeat(ctx, "my-heartbeat") and then got response from calling Client.DescribeWorkflowExecution.
// The response contains binary field PendingActivityInfo.HeartbeatDetails,
// which can be decoded by using:
//
// var result string // This need to be same type as the one passed to RecordHeartbeat
// NewValue(data).Get(&result)
func NewValue(data *commonpb.Payloads) converter.EncodedValue {
return newEncodedValue(data, nil)
}
// NewValues creates a new converter.EncodedValues which can be used to decode binary data returned by Temporal. For example:
// User had Activity.RecordHeartbeat(ctx, "my-heartbeat", 123) and then got response from calling Client.DescribeWorkflowExecution.
// The response contains binary field PendingActivityInfo.HeartbeatDetails,
// which can be decoded by using:
//
// var result1 string
// var result2 int // These need to be same type as those arguments passed to RecordHeartbeat
// NewValues(data).Get(&result1, &result2)
func NewValues(data *commonpb.Payloads) converter.EncodedValues {
return newEncodedValues(data, nil)
}
type apiKeyCredentials func(context.Context) (string, error)
func NewAPIKeyStaticCredentials(apiKey string) Credentials {
return NewAPIKeyDynamicCredentials(func(ctx context.Context) (string, error) { return apiKey, nil })
}
func NewAPIKeyDynamicCredentials(apiKeyCallback func(context.Context) (string, error)) Credentials {
return apiKeyCredentials(apiKeyCallback)
}
func (apiKeyCredentials) applyToOptions(*ClientOptions) error { return nil }
func (a apiKeyCredentials) gRPCInterceptor() grpc.UnaryClientInterceptor { return a.gRPCIntercept }
func (a apiKeyCredentials) gRPCIntercept(
ctx context.Context,
method string,
req any,
reply any,
cc *grpc.ClientConn,
invoker grpc.UnaryInvoker,
opts ...grpc.CallOption,
) error {
if apiKey, err := a(ctx); err != nil {
return err
} else if apiKey != "" {
// Only add API key if it doesn't already exist
if md, _ := metadata.FromOutgoingContext(ctx); len(md.Get("authorization")) == 0 {
ctx = metadata.AppendToOutgoingContext(ctx, "authorization", "Bearer "+apiKey)
}
}
return invoker(ctx, method, req, reply, cc, opts...)
}
type mTLSCredentials tls.Certificate
func NewMTLSCredentials(certificate tls.Certificate) Credentials { return mTLSCredentials(certificate) }
func (m mTLSCredentials) applyToOptions(opts *ClientOptions) error {
if opts.ConnectionOptions.TLS == nil {
opts.ConnectionOptions.TLS = &tls.Config{}
} else if len(opts.ConnectionOptions.TLS.Certificates) != 0 {
return fmt.Errorf("cannot apply mTLS credentials, certificates already exist on TLS options")
}
opts.ConnectionOptions.TLS.Certificates = append(opts.ConnectionOptions.TLS.Certificates, tls.Certificate(m))
return nil
}
func (mTLSCredentials) gRPCInterceptor() grpc.UnaryClientInterceptor { return nil }