/
utils.go
404 lines (368 loc) · 12.1 KB
/
utils.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
package remote
import (
"context"
"encoding/hex"
"fmt"
"os"
"path"
"strings"
"time"
pb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2"
"github.com/bazelbuild/remote-apis/build/bazel/semver"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
rpcstatus "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/thought-machine/please/src/core"
)
// sum calculates a checksum for a byte slice.
func (c *Client) sum(b []byte) []byte {
h := c.state.PathHasher.NewHash()
h.Write(b)
return h.Sum(nil)
}
// targetOutputs returns the outputs for a previously executed target.
// If it has not been executed this returns nil.
func (c *Client) targetOutputs(label core.BuildLabel) *pb.Directory {
c.outputMutex.RLock()
defer c.outputMutex.RUnlock()
return c.outputs[label]
}
// setOutputs sets the outputs for a previously executed target.
func (c *Client) setOutputs(label core.BuildLabel, ar *pb.ActionResult) error {
o := &pb.Directory{
Files: make([]*pb.FileNode, len(ar.OutputFiles)),
Directories: make([]*pb.DirectoryNode, len(ar.OutputDirectories)),
Symlinks: make([]*pb.SymlinkNode, len(ar.OutputFileSymlinks)+len(ar.OutputDirectorySymlinks)),
}
for i, f := range ar.OutputFiles {
o.Files[i] = &pb.FileNode{
Name: f.Path,
Digest: f.Digest,
IsExecutable: f.IsExecutable,
}
}
for i, d := range ar.OutputDirectories {
// Awkwardly these are encoded as Trees rather than as anything directly useful.
// We need a DirectoryNode to feed in as an input later on, but the OutputDirectory
// we get back is quite a different structure at the top level.
// TODO(peterebden): This is pretty crappy since we need to upload an extra blob here
// that we've just made up. Surely there is a better way we could
// be doing this?
tree := &pb.Tree{}
if err := c.readByteStreamToProto(context.Background(), d.TreeDigest, tree); err != nil {
return wrap(err, "Downloading tree digest for %s [%s]", d.Path, d.TreeDigest.Hash)
}
digest, data := c.digestMessageContents(tree.Root)
if err := c.sendBlobs([]*pb.BatchUpdateBlobsRequest_Request{{Digest: digest, Data: data}}); err != nil {
return err
}
o.Directories[i] = &pb.DirectoryNode{
Name: d.Path,
Digest: c.digestMessage(tree.Root),
}
}
for i, s := range append(ar.OutputFileSymlinks, ar.OutputDirectorySymlinks...) {
o.Symlinks[i] = &pb.SymlinkNode{
Name: s.Path,
Target: s.Target,
}
}
c.outputMutex.Lock()
defer c.outputMutex.Unlock()
c.outputs[label] = o
return nil
}
// setFilegroupOutputs sets the outputs for a filegroup from its inputs.
func (c *Client) setFilegroupOutputs(target *core.BuildTarget) error {
return c.uploadBlobs(func(ch chan<- *blob) error {
defer close(ch)
dir, err := c.uploadInputs(ch, target, false, true)
c.outputMutex.Lock()
defer c.outputMutex.Unlock()
c.outputs[target.Label] = dir
return err
})
}
// digestMessage calculates the digest of a proto message as described in the
// Digest message's comments.
func (c *Client) digestMessage(msg proto.Message) *pb.Digest {
digest, _ := c.digestMessageContents(msg)
return digest
}
// digestMessageContents is like DigestMessage but returns the serialised contents as well.
func (c *Client) digestMessageContents(msg proto.Message) (*pb.Digest, []byte) {
b := mustMarshal(msg)
return c.digestBlob(b), b
}
// digestBlob digests a byteslice and returns the proto for it.
func (c *Client) digestBlob(b []byte) *pb.Digest {
sum := c.sum(b)
return &pb.Digest{
Hash: hex.EncodeToString(sum[:]),
SizeBytes: int64(len(b)),
}
}
// wrapActionErr wraps an error with information about the action related to it.
func (c *Client) wrapActionErr(err error, actionDigest *pb.Digest) error {
if err == nil || c.state.Config.Remote.DisplayURL == "" {
return err
}
return wrap(err, "Action URL: %s/action/%s/%s/%d/\n", c.state.Config.Remote.DisplayURL, c.state.Config.Remote.Instance, actionDigest.Hash, actionDigest.SizeBytes)
}
// actionURL returns a URL to the browser for a remote action, if the display URL is configured.
// If prefix is true then it is surrounded by "(action: %s)".
func (c *Client) actionURL(digest *pb.Digest, prefix bool) string {
if c.state.Config.Remote.DisplayURL == "" {
return ""
}
s := fmt.Sprintf("%s/action/%s/%s/%d/", c.state.Config.Remote.DisplayURL, c.state.Config.Remote.Instance, digest.Hash, digest.SizeBytes)
if prefix {
s = " (action: " + s + ")"
}
return s
}
// mustMarshal encodes a message to a binary string.
func mustMarshal(msg proto.Message) []byte {
b, err := proto.Marshal(msg)
if err != nil {
// Not really sure if there is a valid possibility to bring us here (given that
// the messages in question have no required fields) so assume it won't happen :)
log.Fatalf("Failed to marshal message: %s", err)
}
return b
}
// lessThan returns true if the given semver instance is less than another one.
func lessThan(a, b *semver.SemVer) bool {
if a.Major < b.Major {
return true
} else if a.Major > b.Major {
return false
} else if a.Minor < b.Minor {
return true
} else if a.Minor > b.Minor {
return false
} else if a.Patch < b.Patch {
return true
} else if a.Patch > b.Patch {
return false
}
return a.Prerelease < b.Prerelease
}
// printVer pretty-prints a semver message.
// The default stringing of them is so bad as to be completely unreadable.
func printVer(v *semver.SemVer) string {
msg := fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
if v.Prerelease != "" {
msg += "-" + v.Prerelease
}
return msg
}
// toTimestamp converts a time.Time into a protobuf timestamp
func toTimestamp(t time.Time) *timestamp.Timestamp {
return ×tamp.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
}
// toTime converts a protobuf timestamp into a time.Time.
// It's like the ptypes one but we ignore errors (we don't generally care that much)
func toTime(ts *timestamp.Timestamp) time.Time {
t, _ := ptypes.Timestamp(ts)
return t
}
// extraPerms returns any additional permission bits we should apply for this file.
func extraPerms(file *pb.OutputFile) os.FileMode {
if file.IsExecutable {
return 0111
}
return 0
}
// extraFilePerms returns any additional permission bits we should apply for this file.
func extraFilePerms(file *pb.FileNode) os.FileMode {
if file.IsExecutable {
return 0111
}
return 0
}
// IsNotFound returns true if a given error is a "not found" error (which may be treated
// differently, for example if trying to retrieve artifacts that may not be there).
func IsNotFound(err error) bool {
return status.Code(err) == codes.NotFound
}
// hasChild returns true if a Directory has a child directory by the given name.
func hasChild(dir *pb.Directory, child string) bool {
for _, d := range dir.Directories {
if d.Name == child {
return true
}
}
return false
}
// exhaustChannel reads and discards all messages on the given channel.
func exhaustChannel(ch <-chan *blob) {
for range ch {
}
}
// convertError converts a single google.rpc.Status message into a Go error
func convertError(err *rpcstatus.Status) error {
if err.Code == int32(codes.OK) {
return nil
}
msg := fmt.Errorf("%s", err.Message)
for _, detail := range err.Details {
msg = fmt.Errorf("%s %s", msg, detail.Value)
}
return msg
}
// wrap wraps a grpc error in an additional description, but retains its code.
func wrap(err error, msg string, args ...interface{}) error {
s, ok := status.FromError(err)
if !ok {
return fmt.Errorf(fmt.Sprintf(msg, args...) + ": " + err.Error())
}
return status.Errorf(s.Code(), fmt.Sprintf(msg, args...)+": "+s.Message())
}
// timeout returns either a build or test timeout from a target.
func timeout(target *core.BuildTarget, test bool) time.Duration {
if test {
return target.TestTimeout
}
return target.BuildTimeout
}
// outputs returns the outputs of a target, split arbitrarily and inaccurately
// into files and directories.
// After some discussion we are hoping that servers are permissive about this if
// we get it wrong; we prefer to make an effort though as a minor nicety.
func outputs(target *core.BuildTarget) (files, dirs []string) {
outs := target.Outputs()
files = make([]string, 0, len(outs))
for _, out := range outs {
out = target.GetTmpOutput(out)
if !strings.ContainsRune(path.Base(out), '.') && !strings.HasSuffix(out, "file") && !target.IsBinary {
dirs = append(dirs, out)
} else {
files = append(files, out)
}
}
return files, dirs
}
// A dirBuilder is for helping build up a tree of Directory protos.
//
// This is pretty awkward; we need to recursively build a whole set of directories
// which does not match up to how we represent it (which is a series of files, with
// no corresponding directories, that are not usefully ordered for this purpose).
// We also need to handle the case of existing targets where we already know the
// directory structure but may not have the files physically on disk.
type dirBuilder struct {
c *Client
root *pb.Directory
dirs map[string]*pb.Directory
}
func newDirBuilder(c *Client) *dirBuilder {
root := &pb.Directory{}
return &dirBuilder{
dirs: map[string]*pb.Directory{
".": root, // Ensure the root is in there
"": root, // Some things might try to name it this way
},
root: root,
c: c,
}
}
// Dir ensures the given directory exists, and constructs any necessary parents.
func (b *dirBuilder) Dir(name string) *pb.Directory {
return b.dir(name, "")
}
func (b *dirBuilder) dir(dir, child string) *pb.Directory {
if dir == "." || dir == "/" {
return b.root
}
dir = strings.TrimSuffix(dir, "/")
d, present := b.dirs[dir]
if !present {
d = &pb.Directory{}
b.dirs[dir] = d
dir, base := path.Split(dir)
b.dir(dir, base)
}
// TODO(peterebden): The linear scan in hasChild is a bit suboptimal, we should
// really use the dirs map to determine this.
if child != "" && !hasChild(d, child) {
d.Directories = append(d.Directories, &pb.DirectoryNode{Name: child})
}
return d
}
// Root returns the root directory, calculates the digests of all others and uploads them
// if the given channel is not nil.
func (b *dirBuilder) Root(ch chan<- *blob) *pb.Directory {
b.dfs(".", ch)
return b.root
}
func (b *dirBuilder) dfs(name string, ch chan<- *blob) *pb.Digest {
dir := b.dirs[name]
for _, d := range dir.Directories {
if d.Digest == nil { // It's not nil if we're reusing outputs from an earlier call.
d.Digest = b.dfs(path.Join(name, d.Name), ch)
}
}
digest, contents := b.c.digestMessageContents(dir)
if ch != nil {
ch <- &blob{
Digest: digest,
Data: contents,
}
}
return digest
}
// convertPlatform converts the platform entries from the config into a Platform proto.
func convertPlatform(config *core.Configuration) *pb.Platform {
platform := &pb.Platform{}
for _, p := range config.Remote.Platform {
if parts := strings.SplitN(p, "=", 2); len(parts) == 2 {
platform.Properties = append(platform.Properties, &pb.Platform_Property{
Name: parts[0],
Value: parts[1],
})
} else {
log.Warning("Invalid config setting in remote.platform %s; will ignore", p)
}
}
return platform
}
// removeOutputs removes all outputs for a target.
func removeOutputs(target *core.BuildTarget) error {
outDir := target.OutDir()
for _, out := range target.Outputs() {
if err := os.RemoveAll(path.Join(outDir, out)); err != nil {
return fmt.Errorf("Failed to remove output for %s: %s", target, err)
}
}
return nil
}
// totalSize returns the total size of a set of downloads from an ActionResult.
func totalSize(dirs []*pb.Tree, files []*pb.OutputFile) int {
var size int64
for _, file := range files {
size += file.Digest.SizeBytes
}
for _, dir := range dirs {
size += dirSize(dir.Root)
for _, child := range dir.Children {
size += dirSize(child)
}
}
return int(size)
}
// dirSize returns the immediate size of a directory (but not recursively)
func dirSize(dir *pb.Directory) (size int64) {
for _, file := range dir.Files {
size += file.Digest.SizeBytes
}
for _, dir := range dir.Directories {
size += dir.Digest.SizeBytes
}
return size
}