Skip to content

Commit

Permalink
Merge pull request #64 from stevvooe/separate-signature-storage
Browse files Browse the repository at this point in the history
Refactor backend storage layout to meet new requirements (addresses #25, #46)
  • Loading branch information
stevvooe committed Jan 15, 2015
2 parents effa09b + 83d6262 commit e5f0622
Show file tree
Hide file tree
Showing 14 changed files with 1,142 additions and 252 deletions.
34 changes: 15 additions & 19 deletions digest/digest.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,11 @@ func NewDigest(alg string, h hash.Hash) Digest {
return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil)))
}

// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
func NewDigestFromHex(alg, hex string) Digest {
return Digest(fmt.Sprintf("%s:%s", alg, hex))
}

// DigestRegexp matches valid digest types.
var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-zA-Z0-9-_+.=]+`)

Expand All @@ -57,33 +62,24 @@ func ParseDigest(s string) (Digest, error) {

// FromReader returns the most valid digest for the underlying content.
func FromReader(rd io.Reader) (Digest, error) {
h := sha256.New()

// TODO(stevvooe): This is pretty inefficient to always be calculating a
// sha256 hash to provide fallback, but it provides some nice semantics in
// that we never worry about getting the right digest for a given reader.
// For the most part, we can detect tar vs non-tar with only a few bytes,
// so a scheme that saves those bytes would probably be better here.
if _, err := io.Copy(h, rd); err != nil {
return "", err
}

h := sha256.New()
tr := io.TeeReader(rd, h)
return NewDigest("sha256", h), nil
}

ts, err := tarsum.NewTarSum(tr, true, tarsum.Version1)
// FromTarArchive produces a tarsum digest from reader rd.
func FromTarArchive(rd io.Reader) (Digest, error) {
ts, err := tarsum.NewTarSum(rd, true, tarsum.Version1)
if err != nil {
return "", err
}

// Try to copy from the tarsum, if we fail, copy the remaining bytes into
// hash directly.
if _, err := io.Copy(ioutil.Discard, ts); err != nil {
if err.Error() != "archive/tar: invalid tar header" {
return "", err
}

if _, err := io.Copy(h, rd); err != nil {
return "", err
}

return NewDigest("sha256", h), nil
return "", err
}

d, err := ParseDigest(ts.Sum(nil))
Expand Down
2 changes: 1 addition & 1 deletion digest/verifiers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ func TestDigestVerifier(t *testing.T) {
t.Fatalf("error creating tarfile: %v", err)
}

digest, err = FromReader(tf)
digest, err = FromTarArchive(tf)
if err != nil {
t.Fatalf("error digesting tarsum: %v", err)
}
Expand Down
159 changes: 159 additions & 0 deletions storage/blobstore.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
package storage

import (
"fmt"

"github.com/Sirupsen/logrus"

"github.com/docker/distribution/digest"
"github.com/docker/distribution/storagedriver"
)

// TODO(stevvooe): Currently, the blobStore implementation used by the
// manifest store. The layer store should be refactored to better leverage the
// blobStore, reducing duplicated code.

// blobStore implements a generalized blob store over a driver, supporting the
// read side and link management. This object is intentionally a leaky
// abstraction, providing utility methods that support creating and traversing
// backend links.
type blobStore struct {
driver storagedriver.StorageDriver
pm *pathMapper
}

// exists reports whether or not the path exists. If the driver returns error
// other than storagedriver.PathNotFound, an error may be returned.
func (bs *blobStore) exists(dgst digest.Digest) (bool, error) {
path, err := bs.path(dgst)

if err != nil {
return false, err
}

ok, err := exists(bs.driver, path)
if err != nil {
return false, err
}

return ok, nil
}

// get retrieves the blob by digest, returning it a byte slice. This should
// only be used for small objects.
func (bs *blobStore) get(dgst digest.Digest) ([]byte, error) {
bp, err := bs.path(dgst)
if err != nil {
return nil, err
}

return bs.driver.GetContent(bp)
}

// link links the path to the provided digest by writing the digest into the
// target file.
func (bs *blobStore) link(path string, dgst digest.Digest) error {
if exists, err := bs.exists(dgst); err != nil {
return err
} else if !exists {
return fmt.Errorf("cannot link non-existent blob")
}

// The contents of the "link" file are the exact string contents of the
// digest, which is specified in that package.
return bs.driver.PutContent(path, []byte(dgst))
}

// linked reads the link at path and returns the content.
func (bs *blobStore) linked(path string) ([]byte, error) {
linked, err := bs.readlink(path)
if err != nil {
return nil, err
}

return bs.get(linked)
}

// readlink returns the linked digest at path.
func (bs *blobStore) readlink(path string) (digest.Digest, error) {
content, err := bs.driver.GetContent(path)
if err != nil {
return "", err
}

linked, err := digest.ParseDigest(string(content))
if err != nil {
return "", err
}

if exists, err := bs.exists(linked); err != nil {
return "", err
} else if !exists {
return "", fmt.Errorf("link %q invalid: blob %s does not exist", path, linked)
}

return linked, nil
}

// resolve reads the digest link at path and returns the blob store link.
func (bs *blobStore) resolve(path string) (string, error) {
dgst, err := bs.readlink(path)
if err != nil {
return "", err
}

return bs.path(dgst)
}

// put stores the content p in the blob store, calculating the digest. If the
// content is already present, only the digest will be returned. This should
// only be used for small objects, such as manifests.
func (bs *blobStore) put(p []byte) (digest.Digest, error) {
dgst, err := digest.FromBytes(p)
if err != nil {
logrus.Errorf("error digesting content: %v, %s", err, string(p))
return "", err
}

bp, err := bs.path(dgst)
if err != nil {
return "", err
}

// If the content already exists, just return the digest.
if exists, err := bs.exists(dgst); err != nil {
return "", err
} else if exists {
return dgst, nil
}

return dgst, bs.driver.PutContent(bp, p)
}

// path returns the canonical path for the blob identified by digest. The blob
// may or may not exist.
func (bs *blobStore) path(dgst digest.Digest) (string, error) {
bp, err := bs.pm.path(blobDataPathSpec{
digest: dgst,
})

if err != nil {
return "", err
}

return bp, nil
}

// exists provides a utility method to test whether or not
func exists(driver storagedriver.StorageDriver, path string) (bool, error) {
if _, err := driver.Stat(path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
return false, nil
default:
return false, err
}
}

return true, nil
}
13 changes: 9 additions & 4 deletions storage/delegatelayerhandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,17 @@ func (lh *delegateLayerHandler) Resolve(layer Layer) (http.Handler, error) {
// urlFor returns a download URL for the given layer, or the empty string if
// unsupported.
func (lh *delegateLayerHandler) urlFor(layer Layer) (string, error) {
blobPath, err := resolveBlobPath(lh.storageDriver, lh.pathMapper, layer.Name(), layer.Digest())
if err != nil {
return "", err
// Crack open the layer to get at the layerStore
layerRd, ok := layer.(*layerReader)
if !ok {
// TODO(stevvooe): We probably want to find a better way to get at the
// underlying filesystem path for a given layer. Perhaps, the layer
// handler should have its own layer store but right now, it is not
// request scoped.
return "", fmt.Errorf("unsupported layer type: cannot resolve blob path: %v", layer)
}

layerURL, err := lh.storageDriver.URLFor(blobPath, map[string]interface{}{"expiry": time.Now().Add(lh.duration)})
layerURL, err := lh.storageDriver.URLFor(layerRd.path, map[string]interface{}{"expiry": time.Now().Add(lh.duration)})
if err != nil {
return "", err
}
Expand Down
28 changes: 19 additions & 9 deletions storage/layer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,18 @@ func TestSimpleLayerUpload(t *testing.T) {
}

imageName := "foo/bar"

driver := inmemory.New()
pm := &pathMapper{
root: "/storage/testing",
version: storagePathVersion,
}
ls := &layerStore{
driver: inmemory.New(),
pathMapper: &pathMapper{
root: "/storage/testing",
version: storagePathVersion,
driver: driver,
blobStore: &blobStore{
driver: driver,
pm: pm,
},
pathMapper: pm,
}

h := sha256.New()
Expand Down Expand Up @@ -140,12 +145,17 @@ func TestSimpleLayerUpload(t *testing.T) {
func TestSimpleLayerRead(t *testing.T) {
imageName := "foo/bar"
driver := inmemory.New()
pm := &pathMapper{
root: "/storage/testing",
version: storagePathVersion,
}
ls := &layerStore{
driver: driver,
pathMapper: &pathMapper{
root: "/storage/testing",
version: storagePathVersion,
blobStore: &blobStore{
driver: driver,
pm: pm,
},
pathMapper: pm,
}

randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile()
Expand Down Expand Up @@ -307,7 +317,7 @@ func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper,

blobDigestSHA := digest.NewDigest("sha256", h)

blobPath, err := pathMapper.path(blobPathSpec{
blobPath, err := pathMapper.path(blobDataPathSpec{
digest: dgst,
})

Expand Down
44 changes: 28 additions & 16 deletions storage/layerstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
type layerStore struct {
driver storagedriver.StorageDriver
pathMapper *pathMapper
blobStore *blobStore
}

func (ls *layerStore) Exists(name string, digest digest.Digest) (bool, error) {
Expand All @@ -31,31 +32,21 @@ func (ls *layerStore) Exists(name string, digest digest.Digest) (bool, error) {
return true, nil
}

func (ls *layerStore) Fetch(name string, digest digest.Digest) (Layer, error) {
blobPath, err := resolveBlobPath(ls.driver, ls.pathMapper, name, digest)
func (ls *layerStore) Fetch(name string, dgst digest.Digest) (Layer, error) {
bp, err := ls.path(name, dgst)
if err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError:
return nil, ErrUnknownLayer{manifest.FSLayer{BlobSum: digest}}
default:
return nil, err
}
return nil, err
}

fr, err := newFileReader(ls.driver, blobPath)
fr, err := newFileReader(ls.driver, bp)
if err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError:
return nil, ErrUnknownLayer{manifest.FSLayer{BlobSum: digest}}
default:
return nil, err
}
return nil, err
}

return &layerReader{
fileReader: *fr,
name: name,
digest: digest,
digest: dgst,
}, nil
}

Expand Down Expand Up @@ -151,3 +142,24 @@ func (ls *layerStore) newLayerUpload(name, uuid, path string, startedAt time.Tim
fileWriter: *fw,
}, nil
}

func (ls *layerStore) path(name string, dgst digest.Digest) (string, error) {
// We must traverse this path through the link to enforce ownership.
layerLinkPath, err := ls.pathMapper.path(layerLinkPathSpec{name: name, digest: dgst})
if err != nil {
return "", err
}

blobPath, err := ls.blobStore.resolve(layerLinkPath)

if err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
return "", ErrUnknownLayer{manifest.FSLayer{BlobSum: dgst}}
default:
return "", err
}
}

return blobPath, nil
}
4 changes: 2 additions & 2 deletions storage/layerupload.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Dige
// sink. Instead, its read driven. This might be okay.

// Calculate an updated digest with the latest version.
canonical, err := digest.FromReader(tr)
canonical, err := digest.FromTarArchive(tr)
if err != nil {
return "", err
}
Expand All @@ -128,7 +128,7 @@ func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Dige
// identified by dgst. The layer should be validated before commencing the
// move.
func (luc *layerUploadController) moveLayer(dgst digest.Digest) error {
blobPath, err := luc.layerStore.pathMapper.path(blobPathSpec{
blobPath, err := luc.layerStore.pathMapper.path(blobDataPathSpec{
digest: dgst,
})

Expand Down
Loading

0 comments on commit e5f0622

Please sign in to comment.