/
artifacts.go
296 lines (245 loc) · 8.11 KB
/
artifacts.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
// Copyright 2018-2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
package runner
// This file contains the implementation of artifacts that exist as a directory containing
// files on a file system or archives on a cloud storage style platform.
//
// artifacts can be watched for changes and transfers between a file system and
// storage platforms based upon their contents changing etc
//
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"github.com/leaf-ai/go-service/pkg/archive"
"github.com/leaf-ai/studio-go-runner/internal/request"
hasher "github.com/karlmutch/hashstructure"
"github.com/go-stack/stack"
"github.com/jjeffery/kv" // MIT License
)
// ArtifactCache is used to encapsulate and store hashes, typically file hashes, and
// prevent duplicated uploads from occurring needlessly
//
type ArtifactCache struct {
upHashes map[string]uint64
sync.Mutex
// This can be used by the application layer to receive diagnostic and other information
// about kv.occurring inside the caching tracker etc and surface these kv.etc to
// the logging system
ErrorC chan kv.Error
}
// NewArtifactCache initializes an hash tracker for artifact related files and
// passes it back to the caller. The tracking structure can be used to track
// files that already been downloaded / uploaded and also includes a channel
// that can be used to receive error notifications
//
func NewArtifactCache() (cache *ArtifactCache) {
return &ArtifactCache{
upHashes: map[string]uint64{},
ErrorC: make(chan kv.Error),
}
}
// Close will clean up the cache of hashes and close the error reporting channel
// associated with the cache tracker
//
func (cache *ArtifactCache) Close() {
if cache.ErrorC != nil {
defer func() {
// Closing a close channel could cause a panic which is
// acceptable while tearing down the cache
recover()
}()
close(cache.ErrorC)
}
}
func readAllHash(dir string) (hash uint64, err kv.Error) {
files := []os.FileInfo{}
dirs := []string{dir}
for {
newDirs := []string{}
for _, aDir := range dirs {
items, errGo := ioutil.ReadDir(aDir)
if errGo != nil {
return 0, kv.Wrap(errGo).With("hashDir", aDir, "stack", stack.Trace().TrimRuntime())
}
for _, info := range items {
if info.IsDir() {
newDirs = append(newDirs, filepath.Join(aDir, info.Name()))
}
files = append(files, info)
}
}
dirs = newDirs
if len(dirs) == 0 {
break
}
}
hash, errGo := hasher.Hash(files, nil)
if errGo != nil {
return 0, kv.Wrap(errGo).With("stack", stack.Trace().TrimRuntime())
}
return hash, nil
}
// Hash is used to obtain the hash of an artifact from the backing store implementation
// being used by the storage implementation
//
func (cache *ArtifactCache) Hash(ctx context.Context, art *request.Artifact, projectId string, group string, env map[string]string, dir string) (hash string, err kv.Error) {
kv := kv.With("group", group).With("artifact", art.Qualified).With("project", projectId)
storage, err := NewObjStore(
ctx,
&StoreOpts{
Art: art,
ProjectID: projectId,
Group: group,
Env: env,
Validate: true,
},
cache.ErrorC)
if err != nil {
return "", kv.Wrap(err).With("stack", stack.Trace().TrimRuntime())
}
defer storage.Close()
return storage.Hash(ctx, art.Key)
}
// Fetch can be used to retrieve an artifact from a storage layer implementation, while
// passing through the lens of a caching filter that prevents unneeded downloads.
//
func (cache *ArtifactCache) Fetch(ctx context.Context, art *request.Artifact, projectId string, group string, maxBytes int64, env map[string]string, dir string) (size int64, warns []kv.Error, err kv.Error) {
kv := kv.With("group", group).With("artifact", art.Qualified).With("project", projectId)
// Process the qualified URI and use just the path for now
dest := filepath.Join(dir, group)
if errGo := os.MkdirAll(dest, 0700); errGo != nil {
return 0, warns, kv.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()).With("dest", dest)
}
storage, err := NewObjStore(
ctx,
&StoreOpts{
Art: art,
ProjectID: projectId,
Group: group,
Env: env,
Validate: true,
},
cache.ErrorC)
if err != nil {
return 0, warns, err
}
if art.Unpack && !archive.IsTar(art.Key) {
return 0, warns, kv.NewError("the unpack flag was set for an unsupported file format (tar gzip/bzip2 only supported)").With("stack", stack.Trace().TrimRuntime())
}
switch group {
case "_metadata":
//The following is disabled until we look into how to efficiently do downloads of
// experiment related retries rather than downloading an entire hosts worth of activity
// size, warns, err = storage.Gather(ctx, "metadata/", dest)
default:
size, warns, err = storage.Fetch(ctx, art.Key, art.Unpack, dest, maxBytes)
}
storage.Close()
if err != nil {
return 0, warns, err
}
// Immutable artifacts need just to be downloaded and nothing else
if !art.Mutable && !strings.HasPrefix(art.Qualified, "file://") {
return size, warns, nil
}
if cache == nil {
return size, warns, nil
}
if err = cache.updateHash(dest); err != nil {
return 0, warns, err
}
return size, warns, nil
}
func (cache *ArtifactCache) updateHash(dir string) (err kv.Error) {
hash, err := readAllHash(dir)
if err != nil {
return err
}
// Having obtained the artifact if it is mutable then we add a set of upload area hashes for all files and directories the artifact included
cache.Lock()
cache.upHashes[dir] = hash
cache.Unlock()
return nil
}
func (cache *ArtifactCache) checkHash(dir string) (isValid bool, err kv.Error) {
cache.Lock()
defer cache.Unlock()
oldHash, isPresent := cache.upHashes[dir]
if !isPresent {
return false, nil
}
hash, err := readAllHash(dir)
if err != nil {
return false, err
}
return oldHash == hash, nil
}
// Local returns the local disk based file name for the artifacts expanded archive files
//
func (cache *ArtifactCache) Local(group string, dir string, file string) (fn string, err kv.Error) {
fn = filepath.Join(dir, group, file)
if _, errOs := os.Stat(fn); errOs != nil {
return "", kv.Wrap(errOs).With("stack", stack.Trace().TrimRuntime())
}
return fn, nil
}
// Restore the artifacts that have been marked mutable and that have changed
//
func (cache *ArtifactCache) Restore(ctx context.Context, art *request.Artifact, projectId string, group string, env map[string]string, dir string) (uploaded bool, warns []kv.Error, err kv.Error) {
// Immutable artifacts need just to be downloaded and nothing else
if !art.Mutable {
return false, warns, nil
}
kvDetails := []interface{}{"artifact", art.Qualified, "project", projectId, "group", group, "dir", dir}
source := filepath.Join(dir, group)
isValid, err := cache.checkHash(source)
if err != nil {
kvDetails = append(kvDetails, "group", group, "stack", stack.Trace().TrimRuntime())
return false, warns, kv.Wrap(err).With(kvDetails...)
}
if isValid {
warns = append(warns, kv.NewError("hash unchanged").With(kvDetails...))
return false, warns, nil
}
storage, err := NewObjStore(
ctx,
&StoreOpts{
Art: art,
ProjectID: projectId,
Env: env,
Validate: true,
},
cache.ErrorC)
if err != nil {
return false, warns, err
}
defer storage.Close()
// Check to see if the cache has a hash for the directory that has changed and
// needs uploading
//
hash, errHash := readAllHash(dir)
switch group {
case "_metadata":
// If no metadata exists, which could be legitimate, dont try and save it
// otherwise things will go wrong when walking the directories
if _, errGo := os.Stat(source); !os.IsNotExist(errGo) {
if warns, err = storage.Hoard(ctx, source, "metadata"); err != nil {
return false, warns, err.With("group", group)
}
}
default:
if warns, err = storage.Deposit(ctx, source, art.Key); err != nil {
return false, warns, err.With("group", group)
}
}
if errHash == nil {
// Having obtained the artifact if it is mutable then we add a set of upload area hashes for all files and directories the artifact included
cache.Lock()
cache.upHashes[dir] = hash
cache.Unlock()
}
return true, warns, nil
}