-
Notifications
You must be signed in to change notification settings - Fork 405
/
hybrid.go
219 lines (184 loc) · 6.63 KB
/
hybrid.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
package server
import (
"context"
"strings"
"time"
"errors"
"fmt"
"net/http"
"github.com/fnproject/fn/api"
"github.com/fnproject/fn/api/common"
"github.com/fnproject/fn/api/models"
"github.com/gin-gonic/gin"
)
func (s *Server) handleRunnerEnqueue(c *gin.Context) {
ctx := c.Request.Context()
// TODO make this a list & let Push take a list!
var call models.Call
err := c.BindJSON(&call)
if err != nil {
if models.IsAPIError(err) {
handleErrorResponse(c, err)
} else {
handleErrorResponse(c, models.ErrInvalidJSON)
}
return
}
// XXX (reed): validate the call struct
// TODO/NOTE: if this endpoint is called multiple times for the same call we
// need to figure out the behavior we want. as it stands, there will be N
// messages for 1 call which only clogs up the mq with spurious messages
// (possibly useful if things get wedged, not the point), the task will still
// just run once by the first runner to set it to status=running. we may well
// want to push msg only if inserting the call fails, but then we have a call
// in queued state with no message (much harder to handle). having this
// endpoint be retry safe seems ideal and runners likely won't spam it, so current
// behavior is okay [but beware of implications].
call.Status = "queued"
_, err = s.mq.Push(ctx, &call)
if err != nil {
handleErrorResponse(c, err)
return
}
// TODO once update call is hooked up, do this
// at this point, the message is on the queue and could be picked up by a
// runner and enter into 'running' state before we can insert it in the db as
// 'queued' state. we can ignore any error inserting into db here and Start
// will ensure the call exists in the db in 'running' state there.
// s.datastore.InsertCall(ctx, &call)
c.String(http.StatusNoContent, "")
}
func (s *Server) handleRunnerDequeue(c *gin.Context) {
ctx, cancel := context.WithTimeout(c.Request.Context(), 30*time.Second)
defer cancel()
var resp struct {
M []*models.Call `json:"calls"`
}
var m [1]*models.Call // avoid alloc
resp.M = m[:0]
// long poll until ctx expires / we find a message
var b common.Backoff
for {
call, err := s.mq.Reserve(ctx)
if err != nil {
handleErrorResponse(c, err)
return
}
if call != nil {
resp.M = append(resp.M, call)
c.JSON(200, resp)
return
}
b.Sleep(ctx)
select {
case <-ctx.Done():
c.JSON(200, resp) // TODO assert this return `[]` & not 'nil'
return
default: // poll until we find a cookie
}
}
}
func (s *Server) handleRunnerStart(c *gin.Context) {
ctx := c.Request.Context()
var call models.Call
err := c.BindJSON(&call)
if err != nil {
if models.IsAPIError(err) {
handleErrorResponse(c, err)
} else {
handleErrorResponse(c, models.ErrInvalidJSON)
}
return
}
// TODO validate call?
// TODO hook up update. we really just want it to set status to running iff
// status=queued, but this must be in a txn in Update with behavior:
// queued->running
// running->error (returning error)
// error->error (returning error)
// success->success (returning error)
// timeout->timeout (returning error)
//
// there is nuance for running->error as in theory it could be the correct machine retrying
// and we risk not running a task [ever]. needs further thought, but marking as error will
// cover our tracks since if the db is down we can't run anything anyway (treat as such).
// TODO do this client side and validate it here?
//call.Status = "running"
//call.StartedAt = common.DateTime(time.Now())
//err := s.datastore.UpdateCall(c.Request.Context(), &call)
//if err != nil {
//if err == InvalidStatusChange {
//// TODO we could either let UpdateCall handle setting to error or do it
//// here explicitly
// TODO change this to only delete message if the status change fails b/c it already ran
// after messaging semantics change
if err := s.mq.Delete(ctx, &call); err != nil { // TODO change this to take some string(s), not a whole call
handleErrorResponse(c, err)
return
}
//}
//handleV1ErrorResponse(c, err)
//return
//}
c.String(http.StatusNoContent, "")
}
func (s *Server) handleRunnerFinish(c *gin.Context) {
ctx := c.Request.Context()
var body struct {
Call models.Call `json:"call"`
Log string `json:"log"` // TODO use multipart so that we don't have to serialize/deserialize this? measure..
}
err := c.BindJSON(&body)
if err != nil {
if models.IsAPIError(err) {
handleErrorResponse(c, err)
} else {
handleErrorResponse(c, models.ErrInvalidJSON)
}
return
}
// TODO validate?
call := body.Call
// TODO this needs UpdateCall functionality to work for async and should only work if:
// running->error|timeout|success
// TODO all async will fail here :( all sync will work fine :) -- *feeling conflicted*
if err := s.logstore.InsertCall(ctx, &call); err != nil {
common.Logger(ctx).WithError(err).Error("error inserting call into datastore")
// note: Not returning err here since the job could have already finished successfully.
}
if err := s.logstore.InsertLog(ctx, &call, strings.NewReader(body.Log)); err != nil {
common.Logger(ctx).WithError(err).Error("error uploading log")
// note: Not returning err here since the job could have already finished successfully.
}
// TODO open this up after we change messaging semantics.
// TODO we don't know whether a call is async or sync. we likely need an additional
// arg in params for a message id and can detect based on this. for now, delete messages
// for sync and async even though sync doesn't have any (ignore error)
//if err := s.mq.Delete(ctx, &call); err != nil { // TODO change this to take some string(s), not a whole call
//common.Logger(ctx).WithError(err).Error("error deleting mq msg")
//// note: Not returning err here since the job could have already finished successfully.
//}
c.String(http.StatusNoContent, "")
}
func (s *Server) handleRunnerGetTriggerBySource(c *gin.Context) {
ctx := c.Request.Context()
appId := c.MustGet(api.AppID).(string)
triggerType := c.Param(api.ParamTriggerType)
if triggerType == "" {
handleErrorResponse(c, errors.New("no trigger type in request"))
return
}
triggerSource := strings.TrimPrefix(c.Param(api.ParamTriggerSource), "/")
trigger, err := s.datastore.GetTriggerBySource(ctx, appId, triggerType, triggerSource)
if err != nil {
handleErrorResponse(c, err)
return
}
// Not clear that we really need to annotate the trigger here but ... lets do it just in case.
app, err := s.datastore.GetAppByID(ctx, trigger.AppID)
if err != nil {
handleErrorResponse(c, fmt.Errorf("unexpected error - trigger app not available: %s", err))
}
s.triggerAnnotator.AnnotateTrigger(c, app, trigger)
c.JSON(http.StatusOK, trigger)
}