forked from notaryproject/notary
-
Notifications
You must be signed in to change notification settings - Fork 0
/
tufclient.go
325 lines (283 loc) · 11.2 KB
/
tufclient.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
package client
import (
"encoding/json"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/docker/notary"
store "github.com/docker/notary/storage"
"github.com/docker/notary/trustpinning"
"github.com/docker/notary/tuf"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/signed"
)
// TUFClient is a usability wrapper around a raw TUF repo
type TUFClient struct {
remote store.RemoteStore
cache store.MetadataStore
oldBuilder tuf.RepoBuilder
newBuilder tuf.RepoBuilder
}
// NewTUFClient initialized a TUFClient with the given repo, remote source of content, and cache
func NewTUFClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore, cache store.MetadataStore) *TUFClient {
return &TUFClient{
oldBuilder: oldBuilder,
newBuilder: newBuilder,
remote: remote,
cache: cache,
}
}
// Update performs an update to the TUF repo as defined by the TUF spec
func (c *TUFClient) Update() (*tuf.Repo, *tuf.Repo, error) {
// 1. Get timestamp
// a. If timestamp error (verification, expired, etc...) download new root and return to 1.
// 2. Check if local snapshot is up to date
// a. If out of date, get updated snapshot
// i. If snapshot error, download new root and return to 1.
// 3. Check if root correct against snapshot
// a. If incorrect, download new root and return to 1.
// 4. Iteratively download and search targets and delegations to find target meta
logrus.Debug("updating TUF client")
err := c.update()
if err != nil {
logrus.Debug("Error occurred. Root will be downloaded and another update attempted")
logrus.Debug("Resetting the TUF builder...")
c.newBuilder = c.newBuilder.BootstrapNewBuilder()
if err := c.updateRoot(); err != nil {
logrus.Debug("Client Update (Root): ", err)
return nil, nil, err
}
// If we error again, we now have the latest root and just want to fail
// out as there's no expectation the problem can be resolved automatically
logrus.Debug("retrying TUF client update")
if err := c.update(); err != nil {
return nil, nil, err
}
}
return c.newBuilder.Finish()
}
func (c *TUFClient) update() error {
if err := c.downloadTimestamp(); err != nil {
logrus.Debugf("Client Update (Timestamp): %s", err.Error())
return err
}
if err := c.downloadSnapshot(); err != nil {
logrus.Debugf("Client Update (Snapshot): %s", err.Error())
return err
}
// will always need top level targets at a minimum
if err := c.downloadTargets(); err != nil {
logrus.Debugf("Client Update (Targets): %s", err.Error())
return err
}
return nil
}
// updateRoot checks if there is a newer version of the root available, and if so
// downloads all intermediate root files to allow proper key rotation.
func (c *TUFClient) updateRoot() error {
// Get current root version
currentRootConsistentInfo := c.oldBuilder.GetConsistentInfo(data.CanonicalRootRole)
currentVersion := c.oldBuilder.GetLoadedVersion(currentRootConsistentInfo.RoleName)
// Get new root version
raw, err := c.downloadRoot()
switch err.(type) {
case *trustpinning.ErrRootRotationFail:
// Rotation errors are okay since we haven't yet downloaded
// all intermediate root files
break
case nil:
// No error updating root - we were at most 1 version behind
return nil
default:
// Return any non-rotation error.
return err
}
// Load current version into newBuilder
currentRaw, err := c.cache.GetSized(data.CanonicalRootRole.String(), -1)
if err != nil {
logrus.Debugf("error loading %d.%s: %s", currentVersion, data.CanonicalRootRole, err)
return err
}
if err := c.newBuilder.LoadRootForUpdate(currentRaw, currentVersion, false); err != nil {
logrus.Debugf("%d.%s is invalid: %s", currentVersion, data.CanonicalRootRole, err)
return err
}
// Extract newest version number
signedRoot := &data.Signed{}
if err := json.Unmarshal(raw, signedRoot); err != nil {
return err
}
newestRoot, err := data.RootFromSigned(signedRoot)
if err != nil {
return err
}
newestVersion := newestRoot.Signed.SignedCommon.Version
// Update from current + 1 (current already loaded) to newest - 1 (newest loaded below)
if err := c.updateRootVersions(currentVersion+1, newestVersion-1); err != nil {
return err
}
// Already downloaded newest, verify it against newest - 1
if err := c.newBuilder.LoadRootForUpdate(raw, newestVersion, true); err != nil {
logrus.Debugf("downloaded %d.%s is invalid: %s", newestVersion, data.CanonicalRootRole, err)
return err
}
logrus.Debugf("successfully verified downloaded %d.%s", newestVersion, data.CanonicalRootRole)
// Write newest to cache
if err := c.cache.Set(data.CanonicalRootRole.String(), raw); err != nil {
logrus.Debugf("unable to write %s to cache: %d.%s", newestVersion, data.CanonicalRootRole, err)
}
logrus.Debugf("finished updating root files")
return nil
}
// updateRootVersions updates the root from it's current version to a target, rotating keys
// as they are found
func (c *TUFClient) updateRootVersions(fromVersion, toVersion int) error {
for v := fromVersion; v <= toVersion; v++ {
logrus.Debugf("updating root from version %d to version %d, currently fetching %d", fromVersion, toVersion, v)
versionedRole := fmt.Sprintf("%d.%s", v, data.CanonicalRootRole)
raw, err := c.remote.GetSized(versionedRole, -1)
if err != nil {
logrus.Debugf("error downloading %s: %s", versionedRole, err)
return err
}
if err := c.newBuilder.LoadRootForUpdate(raw, v, false); err != nil {
logrus.Debugf("downloaded %s is invalid: %s", versionedRole, err)
return err
}
logrus.Debugf("successfully verified downloaded %s", versionedRole)
}
return nil
}
// downloadTimestamp is responsible for downloading the timestamp.json
// Timestamps are special in that we ALWAYS attempt to download and only
// use cache if the download fails (and the cache is still valid).
func (c *TUFClient) downloadTimestamp() error {
logrus.Debug("Loading timestamp...")
role := data.CanonicalTimestampRole
consistentInfo := c.newBuilder.GetConsistentInfo(role)
// always get the remote timestamp, since it supersedes the local one
cachedTS, cachedErr := c.cache.GetSized(role.String(), notary.MaxTimestampSize)
_, remoteErr := c.tryLoadRemote(consistentInfo, cachedTS)
// check that there was no remote error, or if there was a network problem
// If there was a validation error, we should error out so we can download a new root or fail the update
switch remoteErr.(type) {
case nil:
return nil
case store.ErrMetaNotFound, store.ErrServerUnavailable, store.ErrOffline, store.NetworkError:
break
default:
return remoteErr
}
// since it was a network error: get the cached timestamp, if it exists
if cachedErr != nil {
logrus.Debug("no cached or remote timestamp available")
return remoteErr
}
logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
err := c.newBuilder.Load(role, cachedTS, 1, false)
if err == nil {
logrus.Debug("successfully verified cached timestamp")
}
return err
}
// downloadSnapshot is responsible for downloading the snapshot.json
func (c *TUFClient) downloadSnapshot() error {
logrus.Debug("Loading snapshot...")
role := data.CanonicalSnapshotRole
consistentInfo := c.newBuilder.GetConsistentInfo(role)
_, err := c.tryLoadCacheThenRemote(consistentInfo)
return err
}
// downloadTargets downloads all targets and delegated targets for the repository.
// It uses a pre-order tree traversal as it's necessary to download parents first
// to obtain the keys to validate children.
func (c *TUFClient) downloadTargets() error {
toDownload := []data.DelegationRole{{
BaseRole: data.BaseRole{Name: data.CanonicalTargetsRole},
Paths: []string{""},
}}
for len(toDownload) > 0 {
role := toDownload[0]
toDownload = toDownload[1:]
consistentInfo := c.newBuilder.GetConsistentInfo(role.Name)
if !consistentInfo.ChecksumKnown() {
logrus.Debugf("skipping %s because there is no checksum for it", role.Name)
continue
}
children, err := c.getTargetsFile(role, consistentInfo)
switch err.(type) {
case signed.ErrExpired, signed.ErrRoleThreshold:
if role.Name == data.CanonicalTargetsRole {
return err
}
logrus.Warnf("Error getting %s: %s", role.Name, err)
break
case nil:
toDownload = append(children, toDownload...)
default:
return err
}
}
return nil
}
func (c TUFClient) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) {
logrus.Debugf("Loading %s...", role.Name)
tgs := &data.SignedTargets{}
raw, err := c.tryLoadCacheThenRemote(ci)
if err != nil {
return nil, err
}
// we know it unmarshals because if `tryLoadCacheThenRemote` didn't fail, then
// the raw has already been loaded into the builder
json.Unmarshal(raw, tgs)
return tgs.GetValidDelegations(role), nil
}
// downloadRoot is responsible for downloading the root.json
func (c *TUFClient) downloadRoot() ([]byte, error) {
role := data.CanonicalRootRole
consistentInfo := c.newBuilder.GetConsistentInfo(role)
// We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle
// since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch
if !consistentInfo.ChecksumKnown() {
logrus.Debugf("Loading root with no expected checksum")
// get the cached root, if it exists, just for version checking
cachedRoot, _ := c.cache.GetSized(role.String(), -1)
// prefer to download a new root
return c.tryLoadRemote(consistentInfo, cachedRoot)
}
return c.tryLoadCacheThenRemote(consistentInfo)
}
func (c *TUFClient) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) {
cachedTS, err := c.cache.GetSized(consistentInfo.RoleName.String(), consistentInfo.Length())
if err != nil {
logrus.Debugf("no %s in cache, must download", consistentInfo.RoleName)
return c.tryLoadRemote(consistentInfo, nil)
}
if err = c.newBuilder.Load(consistentInfo.RoleName, cachedTS, 1, false); err == nil {
logrus.Debugf("successfully verified cached %s", consistentInfo.RoleName)
return cachedTS, nil
}
logrus.Debugf("cached %s is invalid (must download): %s", consistentInfo.RoleName, err)
return c.tryLoadRemote(consistentInfo, cachedTS)
}
func (c *TUFClient) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) {
consistentName := consistentInfo.ConsistentName()
raw, err := c.remote.GetSized(consistentName, consistentInfo.Length())
if err != nil {
logrus.Debugf("error downloading %s: %s", consistentName, err)
return old, err
}
// try to load the old data into the old builder - only use it to validate
// versions if it loads successfully. If it errors, then the loaded version
// will be 1
c.oldBuilder.Load(consistentInfo.RoleName, old, 1, true)
minVersion := c.oldBuilder.GetLoadedVersion(consistentInfo.RoleName)
if err := c.newBuilder.Load(consistentInfo.RoleName, raw, minVersion, false); err != nil {
logrus.Debugf("downloaded %s is invalid: %s", consistentName, err)
return raw, err
}
logrus.Debugf("successfully verified downloaded %s", consistentName)
if err := c.cache.Set(consistentInfo.RoleName.String(), raw); err != nil {
logrus.Debugf("Unable to write %s to cache: %s", consistentInfo.RoleName, err)
}
return raw, nil
}