forked from uber/cadence
/
handler.go
340 lines (290 loc) · 11.4 KB
/
handler.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package matching
import (
"context"
"sync"
"time"
"github.com/uber-go/tally"
"github.com/uber/cadence/.gen/go/health"
m "github.com/uber/cadence/.gen/go/matching"
"github.com/uber/cadence/.gen/go/matching/matchingserviceserver"
gen "github.com/uber/cadence/.gen/go/shared"
"github.com/uber/cadence/common"
"github.com/uber/cadence/common/cache"
"github.com/uber/cadence/common/log"
"github.com/uber/cadence/common/metrics"
"github.com/uber/cadence/common/persistence"
"github.com/uber/cadence/common/quotas"
"github.com/uber/cadence/common/service"
)
var _ matchingserviceserver.Interface = (*Handler)(nil)
// Handler - Thrift handler inteface for history service
type Handler struct {
taskPersistence persistence.TaskManager
metadataMgr persistence.MetadataManager
engine Engine
config *Config
metricsClient metrics.Client
startWG sync.WaitGroup
domainCache cache.DomainCache
rateLimiter quotas.Limiter
service.Service
}
var (
errMatchingHostThrottle = &gen.ServiceBusyError{Message: "Matching host rps exceeded"}
)
// NewHandler creates a thrift handler for the history service
func NewHandler(sVice service.Service, config *Config, taskPersistence persistence.TaskManager, metadataMgr persistence.MetadataManager) *Handler {
handler := &Handler{
Service: sVice,
taskPersistence: taskPersistence,
metadataMgr: metadataMgr,
config: config,
rateLimiter: quotas.NewDynamicRateLimiter(func() float64 {
return float64(config.RPS())
}),
}
// prevent us from trying to serve requests before matching engine is started and ready
handler.startWG.Add(1)
return handler
}
// RegisterHandler register this handler, must be called before Start()
func (h *Handler) RegisterHandler() {
h.Service.GetDispatcher().Register(matchingserviceserver.New(h))
}
// Start starts the handler
func (h *Handler) Start() error {
h.Service.Start()
h.domainCache = cache.NewDomainCache(h.metadataMgr, h.GetClusterMetadata(), h.GetMetricsClient(), h.GetLogger())
h.domainCache.Start()
h.metricsClient = h.Service.GetMetricsClient()
client, err := h.Service.GetClientBean().GetMatchingClient(h.domainCache.GetDomainName)
if err != nil {
return err
}
h.engine = NewEngine(
h.taskPersistence,
h.GetClientBean().GetHistoryClient(),
client,
h.config,
h.Service.GetLogger(),
h.Service.GetMetricsClient(),
h.domainCache,
)
h.startWG.Done()
return nil
}
// Stop stops the handler
func (h *Handler) Stop() {
h.engine.Stop()
h.domainCache.Stop()
h.taskPersistence.Close()
h.metadataMgr.Close()
h.Service.Stop()
}
// Health is for health check
func (h *Handler) Health(ctx context.Context) (*health.HealthStatus, error) {
h.startWG.Wait()
h.GetLogger().Debug("Matching service health check endpoint reached.")
hs := &health.HealthStatus{Ok: true, Msg: common.StringPtr("matching good")}
return hs, nil
}
// startRequestProfile initiates recording of request metrics
func (h *Handler) startRequestProfile(api string, scope int) tally.Stopwatch {
h.startWG.Wait()
sw := h.metricsClient.StartTimer(scope, metrics.CadenceLatency)
h.metricsClient.IncCounter(scope, metrics.CadenceRequests)
return sw
}
// AddActivityTask - adds an activity task.
func (h *Handler) AddActivityTask(ctx context.Context, addRequest *m.AddActivityTaskRequest) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
startT := time.Now()
scope := metrics.MatchingAddActivityTaskScope
sw := h.startRequestProfile("AddActivityTask", scope)
defer sw.Stop()
if addRequest.GetForwardedFrom() != "" {
h.metricsClient.IncCounter(scope, metrics.ForwardedCounter)
}
if ok := h.rateLimiter.Allow(); !ok {
return h.handleErr(errMatchingHostThrottle, scope)
}
syncMatch, err := h.engine.AddActivityTask(ctx, addRequest)
if syncMatch {
h.metricsClient.RecordTimer(scope, metrics.SyncMatchLatency, time.Since(startT))
}
return h.handleErr(err, scope)
}
// AddDecisionTask - adds a decision task.
func (h *Handler) AddDecisionTask(ctx context.Context, addRequest *m.AddDecisionTaskRequest) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
startT := time.Now()
scope := metrics.MatchingAddDecisionTaskScope
sw := h.startRequestProfile("AddDecisionTask", scope)
defer sw.Stop()
if addRequest.GetForwardedFrom() != "" {
h.metricsClient.IncCounter(scope, metrics.ForwardedCounter)
}
if ok := h.rateLimiter.Allow(); !ok {
return h.handleErr(errMatchingHostThrottle, scope)
}
syncMatch, err := h.engine.AddDecisionTask(ctx, addRequest)
if syncMatch {
h.metricsClient.RecordTimer(scope, metrics.SyncMatchLatency, time.Since(startT))
}
return h.handleErr(err, scope)
}
// PollForActivityTask - long poll for an activity task.
func (h *Handler) PollForActivityTask(ctx context.Context,
pollRequest *m.PollForActivityTaskRequest) (resp *gen.PollForActivityTaskResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
scope := metrics.MatchingPollForActivityTaskScope
sw := h.startRequestProfile("PollForActivityTask", scope)
defer sw.Stop()
if pollRequest.GetForwardedFrom() != "" {
h.metricsClient.IncCounter(scope, metrics.ForwardedCounter)
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.handleErr(errMatchingHostThrottle, scope)
}
if _, err := common.ValidateLongPollContextTimeoutIsSet(
ctx,
"PollForActivityTask",
h.Service.GetThrottledLogger(),
); err != nil {
return nil, h.handleErr(err, scope)
}
response, err := h.engine.PollForActivityTask(ctx, pollRequest)
return response, h.handleErr(err, scope)
}
// PollForDecisionTask - long poll for a decision task.
func (h *Handler) PollForDecisionTask(ctx context.Context,
pollRequest *m.PollForDecisionTaskRequest) (resp *m.PollForDecisionTaskResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
scope := metrics.MatchingPollForDecisionTaskScope
sw := h.startRequestProfile("PollForDecisionTask", scope)
defer sw.Stop()
if pollRequest.GetForwardedFrom() != "" {
h.metricsClient.IncCounter(scope, metrics.ForwardedCounter)
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.handleErr(errMatchingHostThrottle, scope)
}
if _, err := common.ValidateLongPollContextTimeoutIsSet(
ctx,
"PollForDecisionTask",
h.Service.GetThrottledLogger(),
); err != nil {
return nil, h.handleErr(err, scope)
}
response, err := h.engine.PollForDecisionTask(ctx, pollRequest)
return response, h.handleErr(err, scope)
}
// QueryWorkflow queries a given workflow synchronously and return the query result.
func (h *Handler) QueryWorkflow(ctx context.Context,
queryRequest *m.QueryWorkflowRequest) (resp *gen.QueryWorkflowResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
scope := metrics.MatchingQueryWorkflowScope
sw := h.startRequestProfile("QueryWorkflow", scope)
defer sw.Stop()
if queryRequest.GetForwardedFrom() != "" {
h.metricsClient.IncCounter(scope, metrics.ForwardedCounter)
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.handleErr(errMatchingHostThrottle, scope)
}
response, err := h.engine.QueryWorkflow(ctx, queryRequest)
return response, h.handleErr(err, scope)
}
// RespondQueryTaskCompleted responds a query task completed
func (h *Handler) RespondQueryTaskCompleted(ctx context.Context, request *m.RespondQueryTaskCompletedRequest) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
scope := metrics.MatchingRespondQueryTaskCompletedScope
sw := h.startRequestProfile("RespondQueryTaskCompleted", scope)
defer sw.Stop()
// Count the request in the RPS, but we still accept it even if RPS is exceeded
h.rateLimiter.Allow()
err := h.engine.RespondQueryTaskCompleted(ctx, request)
return h.handleErr(err, scope)
}
// CancelOutstandingPoll is used to cancel outstanding pollers
func (h *Handler) CancelOutstandingPoll(ctx context.Context,
request *m.CancelOutstandingPollRequest) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
scope := metrics.MatchingCancelOutstandingPollScope
sw := h.startRequestProfile("CancelOutstandingPoll", scope)
defer sw.Stop()
// Count the request in the RPS, but we still accept it even if RPS is exceeded
h.rateLimiter.Allow()
err := h.engine.CancelOutstandingPoll(ctx, request)
return h.handleErr(err, scope)
}
// DescribeTaskList returns information about the target tasklist, right now this API returns the
// pollers which polled this tasklist in last few minutes. If includeTaskListStatus field is true,
// it will also return status of tasklist's ackManager (readLevel, ackLevel, backlogCountHint and taskIDBlock).
func (h *Handler) DescribeTaskList(ctx context.Context, request *m.DescribeTaskListRequest) (resp *gen.DescribeTaskListResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
scope := metrics.MatchingDescribeTaskListScope
sw := h.startRequestProfile("DescribeTaskList", scope)
defer sw.Stop()
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.handleErr(errMatchingHostThrottle, scope)
}
response, err := h.engine.DescribeTaskList(ctx, request)
return response, h.handleErr(err, scope)
}
func (h *Handler) handleErr(err error, scope int) error {
if err == nil {
return nil
}
switch err.(type) {
case *gen.InternalServiceError:
h.metricsClient.IncCounter(scope, metrics.CadenceFailures)
return err
case *gen.BadRequestError:
h.metricsClient.IncCounter(scope, metrics.CadenceErrBadRequestCounter)
return err
case *gen.EntityNotExistsError:
h.metricsClient.IncCounter(scope, metrics.CadenceErrEntityNotExistsCounter)
return err
case *gen.WorkflowExecutionAlreadyStartedError:
h.metricsClient.IncCounter(scope, metrics.CadenceErrExecutionAlreadyStartedCounter)
return err
case *gen.DomainAlreadyExistsError:
h.metricsClient.IncCounter(scope, metrics.CadenceErrDomainAlreadyExistsCounter)
return err
case *gen.QueryFailedError:
h.metricsClient.IncCounter(scope, metrics.CadenceErrQueryFailedCounter)
return err
case *gen.LimitExceededError:
h.metricsClient.IncCounter(scope, metrics.CadenceErrLimitExceededCounter)
return err
case *gen.ServiceBusyError:
h.metricsClient.IncCounter(scope, metrics.CadenceErrServiceBusyCounter)
return err
case *gen.DomainNotActiveError:
h.metricsClient.IncCounter(scope, metrics.CadenceErrDomainNotActiveCounter)
return err
default:
h.metricsClient.IncCounter(scope, metrics.CadenceFailures)
return &gen.InternalServiceError{Message: err.Error()}
}
}