forked from pachyderm/pachyderm
-
Notifications
You must be signed in to change notification settings - Fork 1
/
pps.go
311 lines (291 loc) · 9.48 KB
/
pps.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
package client
import (
"fmt"
"io"
"time"
"github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pps"
protostream "go.pedge.io/proto/stream"
)
// NewJob creates a pps.Job.
func NewJob(jobID string) *pps.Job {
return &pps.Job{ID: jobID}
}
const (
// PPSPodNameEnv is the environment variable that a pod can use to
// see its own name. The pod name is made available through the Kubernetes
// downward API.
PPSPodNameEnv = "PPS_POD_NAME"
// PPSLeasePeriod is the amount of time for a lease on a chunk to expire.
// That is, a pod needs to send ContinueJob to PPS at lease once every this
// amount of time in order to keep owning a chunk. In reality, pods send
// ContinueJob more often than that because they need to account for network
// latency.
PPSLeasePeriod = 20 * time.Second
)
var (
// MapMethod defines a pps.Method for mapper pipelines.
MapMethod = &pps.Method{
Partition: pps.Partition_BLOCK,
Incremental: pps.Incremental_DIFF,
}
// ReduceMethod defines a pps.Method for non-incremental reducer pipelines.
ReduceMethod = &pps.Method{
Partition: pps.Partition_FILE,
Incremental: pps.Incremental_NONE,
}
// IncrementalReduceMethod defines a pps.Method for incremental reducer pipelines.
IncrementalReduceMethod = &pps.Method{
Partition: pps.Partition_FILE,
Incremental: pps.Incremental_DIFF,
}
// GlobalMethod defines a pps.Method for non-incremental, non-partitioned pipelines.
GlobalMethod = &pps.Method{
Partition: pps.Partition_REPO,
Incremental: pps.Incremental_NONE,
}
// DefaultMethod defines the default pps.Method for a pipeline.
DefaultMethod = MapMethod
// MethodAliasMap maps a string to a pps.Method for JSON decoding.
MethodAliasMap = map[string]*pps.Method{
"map": MapMethod,
"reduce": ReduceMethod,
"incremental_reduce": IncrementalReduceMethod,
"global": GlobalMethod,
}
// ReservedRepoNames defines a set of reserved repo names for internal use.
ReservedRepoNames = map[string]bool{
"out": true,
"prev": true,
}
)
// NewJobInput creates a pps.JobInput.
func NewJobInput(repoName string, commitID string, method *pps.Method) *pps.JobInput {
return &pps.JobInput{
Commit: NewCommit(repoName, commitID),
Method: method,
}
}
// NewPipeline creates a pps.Pipeline.
func NewPipeline(pipelineName string) *pps.Pipeline {
return &pps.Pipeline{Name: pipelineName}
}
// NewPipelineInput creates a new pps.PipelineInput
func NewPipelineInput(repoName string, method *pps.Method) *pps.PipelineInput {
return &pps.PipelineInput{
Repo: NewRepo(repoName),
Method: method,
}
}
// CreateJob creates and runs a job in PPS.
// image is the Docker image to run the job in.
// cmd is the command passed to the Docker run invocation.
// NOTE as with Docker cmd is not run inside a shell that means that things
// like wildcard globbing (*), pipes (|) and file redirects (> and >>) will not
// work. To get that behavior you should have your command be a shell of your
// choice and pass a shell script to stdin.
// stdin is a slice of lines that are sent to your command on stdin. Lines need
// not end in newline characters.
// parallelism is how many copies of your container should run in parallel. You
// may pass 0 for parallelism in which case PPS will set the parallelism based
// on availabe resources.
// inputs specifies a set of Commits that will be visible to the job during runtime.
// parentJobID specifies the a job to use as a parent, it may be left empty in
// which case there is no parent job. If not left empty your job will use the
// parent Job's output commit as the parent of its output commit.
func (c APIClient) CreateJob(
image string,
cmd []string,
stdin []string,
parallelismSpec *pps.ParallelismSpec,
inputs []*pps.JobInput,
parentJobID string,
internalPort int32,
externalPort int32,
) (*pps.Job, error) {
var parentJob *pps.Job
var service *pps.Service
if internalPort != 0 {
service = &pps.Service{
InternalPort: internalPort,
}
}
if externalPort != 0 {
if internalPort == 0 {
return nil, fmt.Errorf("external port specified without internal port")
}
service.ExternalPort = externalPort
}
if parentJobID != "" {
parentJob = NewJob(parentJobID)
}
job, err := c.PpsAPIClient.CreateJob(
c.ctx(),
&pps.CreateJobRequest{
Transform: &pps.Transform{
Image: image,
Cmd: cmd,
Stdin: stdin,
},
ParallelismSpec: parallelismSpec,
Inputs: inputs,
ParentJob: parentJob,
Service: service,
},
)
return job, sanitizeErr(err)
}
// InspectJob returns info about a specific job.
// blockOutput will cause the call to block until the job has been assigned an output commit.
// blockState will cause the call to block until the job reaches a terminal state (failure or success).
func (c APIClient) InspectJob(jobID string, blockState bool) (*pps.JobInfo, error) {
jobInfo, err := c.PpsAPIClient.InspectJob(
c.ctx(),
&pps.InspectJobRequest{
Job: NewJob(jobID),
BlockState: blockState,
})
return jobInfo, sanitizeErr(err)
}
// ListJob returns info about all jobs.
// If pipelineName is non empty then only jobs that were started by the named pipeline will be returned
// If inputCommit is non-nil then only jobs which took the specific commits as inputs will be returned.
// The order of the inputCommits doesn't matter.
func (c APIClient) ListJob(pipelineName string, inputCommit []*pfs.Commit) ([]*pps.JobInfo, error) {
var pipeline *pps.Pipeline
if pipelineName != "" {
pipeline = NewPipeline(pipelineName)
}
jobInfos, err := c.PpsAPIClient.ListJob(
c.ctx(),
&pps.ListJobRequest{
Pipeline: pipeline,
InputCommit: inputCommit,
})
if err != nil {
return nil, sanitizeErr(err)
}
return jobInfos.JobInfo, nil
}
// DeleteJob deletes a job along with its output Repo
func (c APIClient) DeleteJob(jobID string) error {
_, err := c.PpsAPIClient.DeleteJob(
c.ctx(),
&pps.DeleteJobRequest{
Job: NewJob(jobID),
},
)
return sanitizeErr(err)
}
// GetLogs gets logs from a job (logs includes stdout and stderr).
func (c APIClient) GetLogs(
jobID string,
writer io.Writer,
) error {
getLogsClient, err := c.PpsAPIClient.GetLogs(
c.ctx(),
&pps.GetLogsRequest{
Job: NewJob(jobID),
},
)
if err != nil {
return sanitizeErr(err)
}
return sanitizeErr(protostream.WriteFromStreamingBytesClient(getLogsClient, writer))
}
// CreatePipeline creates a new pipeline, pipelines are the main computation
// object in PPS they create a flow of data from a set of input Repos to an
// output Repo (which has the same name as the pipeline). Whenever new data is
// committed to one of the input repos the pipelines will create jobs to bring
// the output Repo up to data.
// image is the Docker image to run the jobs in.
// cmd is the command passed to the Docker run invocation.
// NOTE as with Docker cmd is not run inside a shell that means that things
// like wildcard globbing (*), pipes (|) and file redirects (> and >>) will not
// work. To get that behavior you should have your command be a shell of your
// choice and pass a shell script to stdin.
// stdin is a slice of lines that are sent to your command on stdin. Lines need
// not end in newline characters.
// parallelism is how many copies of your container should run in parallel. You
// may pass 0 for parallelism in which case PPS will set the parallelism based
// on availabe resources.
// inputs specifies a set of Repos that will be visible to the jobs during runtime.
// commits to these repos will cause the pipeline to create new jobs to process them.
// update indicates that you want to update an existing pipeline
func (c APIClient) CreatePipeline(
name string,
image string,
cmd []string,
stdin []string,
parallelismSpec *pps.ParallelismSpec,
inputs []*pps.PipelineInput,
update bool,
) error {
_, err := c.PpsAPIClient.CreatePipeline(
c.ctx(),
&pps.CreatePipelineRequest{
Pipeline: NewPipeline(name),
Transform: &pps.Transform{
Image: image,
Cmd: cmd,
Stdin: stdin,
},
ParallelismSpec: parallelismSpec,
Inputs: inputs,
Update: update,
},
)
return sanitizeErr(err)
}
// InspectPipeline returns info about a specific pipeline.
func (c APIClient) InspectPipeline(pipelineName string) (*pps.PipelineInfo, error) {
pipelineInfo, err := c.PpsAPIClient.InspectPipeline(
c.ctx(),
&pps.InspectPipelineRequest{
Pipeline: NewPipeline(pipelineName),
},
)
return pipelineInfo, sanitizeErr(err)
}
// ListPipeline returns info about all pipelines.
func (c APIClient) ListPipeline() ([]*pps.PipelineInfo, error) {
pipelineInfos, err := c.PpsAPIClient.ListPipeline(
c.ctx(),
&pps.ListPipelineRequest{},
)
if err != nil {
return nil, sanitizeErr(err)
}
return pipelineInfos.PipelineInfo, nil
}
// DeletePipeline deletes a pipeline along with its output Repo.
func (c APIClient) DeletePipeline(name string) error {
_, err := c.PpsAPIClient.DeletePipeline(
c.ctx(),
&pps.DeletePipelineRequest{
Pipeline: NewPipeline(name),
},
)
return sanitizeErr(err)
}
// StartPipeline restarts a stopped pipeline.
func (c APIClient) StartPipeline(name string) error {
_, err := c.PpsAPIClient.StartPipeline(
c.ctx(),
&pps.StartPipelineRequest{
Pipeline: NewPipeline(name),
},
)
return sanitizeErr(err)
}
// StopPipeline prevents a pipeline from processing things, it can be restarted
// with StartPipeline.
func (c APIClient) StopPipeline(name string) error {
_, err := c.PpsAPIClient.StopPipeline(
c.ctx(),
&pps.StopPipelineRequest{
Pipeline: NewPipeline(name),
},
)
return sanitizeErr(err)
}