Skip to content

Commit

Permalink
Merge pull request #613 from sudo-bmitch/pr-blob-reg-tests
Browse files Browse the repository at this point in the history
Add blob tests
  • Loading branch information
sudo-bmitch committed Nov 22, 2023
2 parents e749823 + f5f7e77 commit 1a8e8e1
Show file tree
Hide file tree
Showing 3 changed files with 157 additions and 4 deletions.
5 changes: 4 additions & 1 deletion blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -236,9 +236,12 @@ func (rc *RegClient) BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.R
}

// BlobPut uploads a blob to a repository.
// Descriptor is optional, leave size and digest to zero value if unknown.
// Reader must also be an [io.Seeker] to support chunked upload fallback.
//
// This will attempt an anonymous blob mount first which some registries may support.
// It will then try doing a full put of the blob without chunking (most widely supported).
// If the full put fails, it will fall back to a chunked upload (useful for flaky networks) if the reader is also an [io.Seeker].
// If the full put fails, it will fall back to a chunked upload (useful for flaky networks).
func (rc *RegClient) BlobPut(ctx context.Context, r ref.Ref, d types.Descriptor, rdr io.Reader) (types.Descriptor, error) {
if !r.IsSetRepo() {
return types.Descriptor{}, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), types.ErrInvalidReference)
Expand Down
3 changes: 3 additions & 0 deletions scheme/reg/blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,9 @@ func (reg *Reg) BlobMount(ctx context.Context, rSrc ref.Ref, rTgt ref.Ref, d typ
}

// BlobPut uploads a blob to a repository.
// Descriptor is optional, leave size and digest to zero value if unknown.
// Reader must also be an [io.Seeker] to support chunked upload fallback.
//
// This will attempt an anonymous blob mount first which some registries may support.
// It will then try doing a full put of the blob without chunking (most widely supported).
// If the full put fails, it will fall back to a chunked upload (useful for flaky networks).
Expand Down
153 changes: 150 additions & 3 deletions scheme/reg/blob_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -365,6 +365,8 @@ func TestBlobGet(t *testing.T) {
func TestBlobPut(t *testing.T) {
t.Parallel()
blobRepo := "/proj/repo"
blobRepo5 := "/proj/repo5"
blobRepo6 := "/proj/repo6"
// privateRepo := "/proj/private"
ctx := context.Background()
// include a random blob
Expand All @@ -374,6 +376,7 @@ func TestBlobPut(t *testing.T) {
blobLen := 1024 // must be blobChunk < blobLen <= blobChunk * 2
blobLen3 := 1000 // blob without a full final chunk
blobLen4 := 2048 // must be blobChunk < blobLen <= blobChunk * 2
blobLen5 := 500 // single chunk
d1, blob1 := reqresp.NewRandomBlob(blobLen, seed)
uuid1 := uuid.New()
d2, blob2 := reqresp.NewRandomBlob(blobLen, seed+1)
Expand All @@ -382,6 +385,11 @@ func TestBlobPut(t *testing.T) {
uuid3 := uuid.New()
d4, blob4 := reqresp.NewRandomBlob(blobLen4, seed+3)
uuid4 := uuid.New()
d5, blob5 := reqresp.NewRandomBlob(blobLen5, seed+4)
uuid5 := uuid.New()
blob6 := []byte{}
d6 := digest.Canonical.FromBytes(blob6)
uuid6 := uuid.New()
// dMissing := digest.FromBytes([]byte("missing"))
user := "testing"
pass := "password"
Expand Down Expand Up @@ -874,6 +882,108 @@ func TestBlobPut(t *testing.T) {
},
},
},
// get upload5 location
{
ReqEntry: reqresp.ReqEntry{
Name: "POST for d5",
Method: "POST",
Path: "/v2" + blobRepo5 + "/blobs/uploads/",
},
RespEntry: reqresp.RespEntry{
Status: http.StatusAccepted,
Headers: http.Header{
"Content-Length": {"0"},
"Location": {uuid5.String()},
},
},
},
// upload put for d5
{
ReqEntry: reqresp.ReqEntry{
DelOnUse: false,
Name: "PUT for chunked d5",
Method: "PUT",
Path: "/v2" + blobRepo5 + "/blobs/uploads/" + uuid5.String(),
Query: map[string][]string{
"digest": {d5.String()},
"chunk": {"1"},
},
Headers: http.Header{
"Content-Length": {"0"},
"Content-Type": {"application/octet-stream"},
},
},
RespEntry: reqresp.RespEntry{
Status: http.StatusCreated,
Headers: http.Header{
"Content-Length": {"0"},
"Location": {"/v2" + blobRepo5 + "/blobs/" + d5.String()},
"Docker-Content-Digest": {d5.String()},
},
},
},
// upload patch d5
{
ReqEntry: reqresp.ReqEntry{
DelOnUse: false,
Name: "PATCH for d5",
Method: "PATCH",
Path: "/v2" + blobRepo5 + "/blobs/uploads/" + uuid5.String(),
Headers: http.Header{
"Content-Length": {fmt.Sprintf("%d", blobLen5)},
"Content-Range": {fmt.Sprintf("%d-%d", 0, blobLen5-1)},
"Content-Type": {"application/octet-stream"},
},
Body: blob5,
},
RespEntry: reqresp.RespEntry{
Status: http.StatusAccepted,
Headers: http.Header{
"Content-Length": {fmt.Sprintf("%d", 0)},
"Range": {fmt.Sprintf("bytes=0-%d", blobLen5-1)},
"Location": {uuid5.String() + "?chunk=1"},
},
},
},
// get upload6 location
{
ReqEntry: reqresp.ReqEntry{
Name: "POST for d6",
Method: "POST",
Path: "/v2" + blobRepo6 + "/blobs/uploads/",
},
RespEntry: reqresp.RespEntry{
Status: http.StatusAccepted,
Headers: http.Header{
"Content-Length": {"0"},
"Location": {uuid6.String()},
},
},
},
// upload put for d6
{
ReqEntry: reqresp.ReqEntry{
DelOnUse: false,
Name: "PUT for d6",
Method: "PUT",
Path: "/v2" + blobRepo6 + "/blobs/uploads/" + uuid6.String(),
Query: map[string][]string{
"digest": {d6.String()},
},
Headers: http.Header{
"Content-Length": {"0"},
"Content-Type": {"application/octet-stream"},
},
},
RespEntry: reqresp.RespEntry{
Status: http.StatusCreated,
Headers: http.Header{
"Content-Length": {"0"},
"Location": {"/v2" + blobRepo6 + "/blobs/" + d6.String()},
"Docker-Content-Digest": {d6.String()},
},
},
},
}
rrs = append(rrs, reqresp.BaseEntries...)
// create a server
Expand Down Expand Up @@ -941,7 +1051,6 @@ func TestBlobPut(t *testing.T) {
if dp.Size != int64(len(blob1)) {
t.Errorf("Content length mismatch, expected %d, received %d", len(blob1), dp.Size)
}

})

t.Run("Retry", func(t *testing.T) {
Expand All @@ -961,7 +1070,6 @@ func TestBlobPut(t *testing.T) {
if dp.Size != int64(len(blob2)) {
t.Errorf("Content length mismatch, expected %d, received %d", len(blob2), dp.Size)
}

})

t.Run("PartialChunk", func(t *testing.T) {
Expand All @@ -981,7 +1089,6 @@ func TestBlobPut(t *testing.T) {
if dp.Size != int64(len(blob3)) {
t.Errorf("Content length mismatch, expected %d, received %d", len(blob3), dp.Size)
}

})

t.Run("Chunk resized", func(t *testing.T) {
Expand All @@ -1003,5 +1110,45 @@ func TestBlobPut(t *testing.T) {
}
})

// test put without a descriptor
t.Run("No descriptor", func(t *testing.T) {
r, err := ref.New(tsURL.Host + blobRepo5)
if err != nil {
t.Errorf("Failed creating ref: %v", err)
}
br := bytes.NewReader(blob5)
dp, err := reg.BlobPut(ctx, r, types.Descriptor{}, br)
if err != nil {
t.Errorf("Failed running BlobPut: %v", err)
return
}
if dp.Digest.String() != d5.String() {
t.Errorf("Digest mismatch, expected %s, received %s", d5.String(), dp.Digest.String())
}
if dp.Size != int64(len(blob5)) {
t.Errorf("Content length mismatch, expected %d, received %d", len(blob5), dp.Size)
}
})

// test put of a zero length blob
t.Run("Empty blob", func(t *testing.T) {
r, err := ref.New(tsURL.Host + blobRepo6)
if err != nil {
t.Errorf("Failed creating ref: %v", err)
}
br := bytes.NewReader(blob6)
dp, err := reg.BlobPut(ctx, r, types.Descriptor{Digest: d6, Size: int64(len(blob6))}, br)
if err != nil {
t.Errorf("Failed running BlobPut: %v", err)
return
}
if dp.Digest.String() != d6.String() {
t.Errorf("Digest mismatch, expected %s, received %s", d6.String(), dp.Digest.String())
}
if dp.Size != int64(len(blob6)) {
t.Errorf("Content length mismatch, expected %d, received %d", len(blob6), dp.Size)
}
})

// TODO: test failed mount (blobGetUploadURL)
}

0 comments on commit 1a8e8e1

Please sign in to comment.