forked from uber/kraken
/
blobs.go
139 lines (122 loc) · 3.65 KB
/
blobs.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dockerregistry
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"time"
"github.com/uber/kraken/lib/dockerregistry/transfer"
"github.com/uber/kraken/lib/store"
storagedriver "github.com/docker/distribution/registry/storage/driver"
)
// BlobStore defines cache file accessors.
type BlobStore interface {
GetCacheFileStat(name string) (os.FileInfo, error)
GetCacheFileReader(name string) (store.FileReader, error)
}
type blobs struct {
bs BlobStore
transferer transfer.ImageTransferer
}
func newBlobs(bs BlobStore, transferer transfer.ImageTransferer) *blobs {
return &blobs{bs, transferer}
}
// getDigest returns blob digest given a blob path.
func (b *blobs) getDigest(path string) ([]byte, error) {
digest, err := GetLayerDigest(path)
if err != nil {
return nil, err
}
return []byte(digest.String()), nil
}
func (b *blobs) stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
repo, err := parseRepo(ctx)
if err != nil {
return nil, fmt.Errorf("parse repo %s: %s", path, err)
}
digest, err := GetBlobDigest(path)
if err != nil {
return nil, err
}
bi, err := b.transferer.Stat(repo, digest)
if err != nil {
if err == transfer.ErrBlobNotFound {
return nil, storagedriver.PathNotFoundError{
DriverName: "kraken",
Path: digest.Hex(),
}
}
return nil, fmt.Errorf("transferer stat: %s", err)
}
// Hacking the path, since kraken storage driver is also the consumer of this info.
// Instead of the relative path from root that docker registry expected, just use content hash.
return storagedriver.FileInfoInternal{
FileInfoFields: storagedriver.FileInfoFields{
Path: digest.Hex(),
Size: bi.Size,
ModTime: time.Now(),
IsDir: false,
},
}, nil
}
func (b *blobs) reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
return b.getCacheReaderHelper(ctx, path, offset)
}
func (b *blobs) getContent(ctx context.Context, path string) ([]byte, error) {
r, err := b.getCacheReaderHelper(ctx, path, 0)
if err != nil {
return nil, err
}
defer r.Close()
return ioutil.ReadAll(r)
}
func (b *blobs) getCacheReaderHelper(
ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
repo, err := parseRepo(ctx)
if err != nil {
return nil, fmt.Errorf("parse repo %s: %s", path, err)
}
digest, err := GetBlobDigest(path)
if err != nil {
return nil, fmt.Errorf("get layer digest %s: %s", path, err)
}
r, err := b.transferer.Download(repo, digest)
if err != nil {
if err == transfer.ErrBlobNotFound {
return nil, storagedriver.PathNotFoundError{
DriverName: "kraken",
Path: digest.Hex(),
}
}
return nil, fmt.Errorf("transferer download: %s", err)
}
if _, err := r.Seek(offset, 0); err != nil {
return nil, fmt.Errorf("seek: %s", err)
}
return r, nil
}
func parseRepo(ctx context.Context) (string, error) {
repo, ok := ctx.Value("vars.name").(string)
if !ok {
return "", errors.New("could not parse vars.name from context")
}
if repo == "" {
return "", errors.New("vars.name is empty")
}
return repo, nil
}