Skip to content

Commit

Permalink
Fix CopyObjectPart precondition check when source is ssec encrypted
Browse files Browse the repository at this point in the history
CopyObject precondition checks into GetObjectReader
in order to perform SSE-C pre-condition checks using the
last 32 bytes of encrypted ETag rather than the decrypted
ETag

This also necessitates moving precondition checks for
gateways to gateway layer rather than object handler check
  • Loading branch information
poornas authored and vadmeste committed Feb 27, 2019
1 parent ce588d1 commit 5dd4e0f
Show file tree
Hide file tree
Showing 14 changed files with 125 additions and 43 deletions.
2 changes: 1 addition & 1 deletion cmd/disk-cache.go
Expand Up @@ -254,7 +254,7 @@ func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string,

cleanupBackend := func() { bkReader.Close() }
cleanupPipe := func() { pipeReader.Close() }
gr = NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, cleanupBackend, cleanupPipe)
gr = NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, opts.CheckCopyPrecondFn, cleanupBackend, cleanupPipe)
return gr, nil
}

Expand Down
6 changes: 3 additions & 3 deletions cmd/fs-v1.go
Expand Up @@ -500,7 +500,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
if hasSuffix(object, slashSeparator) {
// The lock taken above is released when
// objReader.Close() is called by the caller.
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, nsUnlocker), nil
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFN, nsUnlocker), nil
}
// Take a rwPool lock for NFS gateway type deployment
rwPoolUnlocker := func() {}
Expand All @@ -517,7 +517,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
rwPoolUnlocker = func() { fs.rwPool.Close(fsMetaPath) }
}

objReaderFn, off, length, rErr := NewGetObjectReader(rs, objInfo, nsUnlocker, rwPoolUnlocker)
objReaderFn, off, length, rErr := NewGetObjectReader(rs, objInfo, opts.CheckCopyPrecondFn, nsUnlocker, rwPoolUnlocker)
if rErr != nil {
return nil, rErr
}
Expand Down Expand Up @@ -545,7 +545,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
return nil, err
}

return objReaderFn(reader, h, closeFn)
return objReaderFn(reader, h, opts.CheckCopyPrecondFn, closeFn)
}

// GetObject - reads an object from the disk.
Expand Down
5 changes: 4 additions & 1 deletion cmd/gateway/azure/gateway-azure.go
Expand Up @@ -651,7 +651,7 @@ func (a *azureObjects) GetObjectNInfo(ctx context.Context, bucket, object string
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser), nil
}

// GetObject - reads an object from azure. Supports additional
Expand Down Expand Up @@ -829,6 +829,9 @@ func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, r *
// CopyObject - Copies a blob from source container to destination container.
// Uses Azure equivalent CopyBlob API.
func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") {
return minio.ObjectInfo{}, minio.PreConditionFailed{}
}
srcBlobURL := a.client.GetContainerReference(srcBucket).GetBlobReference(srcObject).GetURL()
destBlob := a.client.GetContainerReference(destBucket).GetBlobReference(destObject)
azureMeta, props, err := s3MetaToAzureProperties(ctx, srcInfo.UserDefined)
Expand Down
2 changes: 1 addition & 1 deletion cmd/gateway/b2/gateway-b2.go
Expand Up @@ -417,7 +417,7 @@ func (l *b2Objects) GetObjectNInfo(ctx context.Context, bucket, object string, r
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser), nil
}

// GetObject reads an object from B2. Supports additional
Expand Down
6 changes: 4 additions & 2 deletions cmd/gateway/gcs/gateway-gcs.go
Expand Up @@ -753,7 +753,7 @@ func (l *gcsGateway) GetObjectNInfo(ctx context.Context, bucket, object string,
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser), nil
}

// GetObject - reads an object from GCS. Supports additional
Expand Down Expand Up @@ -918,7 +918,9 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r
// CopyObject - Copies a blob from source container to destination container.
func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) {

if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") {
return minio.ObjectInfo{}, minio.PreConditionFailed{}
}
src := l.client.Bucket(srcBucket).Object(srcObject)
dst := l.client.Bucket(destBucket).Object(destObject)

Expand Down
7 changes: 5 additions & 2 deletions cmd/gateway/oss/gateway-oss.go
Expand Up @@ -26,7 +26,7 @@ import (
"strings"

"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/dustin/go-humanize"
humanize "github.com/dustin/go-humanize"

"github.com/minio/cli"
miniogopolicy "github.com/minio/minio-go/pkg/policy"
Expand Down Expand Up @@ -569,7 +569,7 @@ func (l *ossObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser), nil
}

// GetObject reads an object on OSS. Supports additional
Expand Down Expand Up @@ -664,6 +664,9 @@ func (l *ossObjects) PutObject(ctx context.Context, bucket, object string, r *mi

// CopyObject copies an object from source bucket to a destination bucket.
func (l *ossObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") {
return minio.ObjectInfo{}, minio.PreConditionFailed{}
}
bkt, err := l.Client.Bucket(srcBucket)
if err != nil {
logger.LogIf(ctx, err)
Expand Down
4 changes: 2 additions & 2 deletions cmd/gateway/s3/gateway-s3-sse.go
Expand Up @@ -313,7 +313,7 @@ func (l *s3EncObjects) GetObjectNInfo(ctx context.Context, bucket, object string
return l.s3Objects.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
}
objInfo.UserDefined = minio.CleanMinioInternalMetadataKeys(objInfo.UserDefined)
fn, off, length, err := minio.NewGetObjectReader(rs, objInfo)
fn, off, length, err := minio.NewGetObjectReader(rs, objInfo, o.CheckCopyPrecondFn)
if err != nil {
return nil, minio.ErrorRespToObjectError(err)
}
Expand All @@ -329,7 +329,7 @@ func (l *s3EncObjects) GetObjectNInfo(ctx context.Context, bucket, object string
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return fn(pr, h, pipeCloser)
return fn(pr, h, o.CheckCopyPrecondFn, pipeCloser)
}

// GetObjectInfo reads object info and replies back ObjectInfo
Expand Down
8 changes: 7 additions & 1 deletion cmd/gateway/s3/gateway-s3.go
Expand Up @@ -400,7 +400,7 @@ func (l *s3Objects) GetObjectNInfo(ctx context.Context, bucket, object string, r
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser), nil
}

// GetObject reads an object from S3. Supports additional
Expand Down Expand Up @@ -463,6 +463,9 @@ func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string,

// CopyObject copies an object from source bucket to a destination bucket.
func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject string, dstBucket string, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") {
return minio.ObjectInfo{}, minio.PreConditionFailed{}
}
// Set this header such that following CopyObject() always sets the right metadata on the destination.
// metadata input is already a trickled down value from interpreting x-amz-metadata-directive at
// handler layer. So what we have right now is supposed to be applied on the destination object anyways.
Expand Down Expand Up @@ -533,6 +536,9 @@ func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object str
// existing object or a part of it.
func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) {
if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") {
return minio.PartInfo{}, minio.PreConditionFailed{}
}
srcInfo.UserDefined = map[string]string{
"x-amz-copy-source-if-match": srcInfo.ETag,
}
Expand Down
12 changes: 12 additions & 0 deletions cmd/object-api-errors.go
Expand Up @@ -383,3 +383,15 @@ func isErrObjectNotFound(err error) bool {
_, ok := err.(ObjectNotFound)
return ok
}

// PreConditionFailed - Check if copy precondition failed
type PreConditionFailed struct{}

func (e PreConditionFailed) Error() string {
return "At least one of the pre-conditions you specified did not hold"
}

func isErrPreconditionFailed(err error) bool {
_, ok := err.(PreConditionFailed)
return ok
}
4 changes: 4 additions & 0 deletions cmd/object-api-interface.go
Expand Up @@ -26,10 +26,14 @@ import (
"github.com/minio/minio/pkg/policy"
)

// CheckCopyPreconditionFn returns true if copy precondition check failed.
type CheckCopyPreconditionFn func(o ObjectInfo, encETag string) bool

// ObjectOptions represents object options for ObjectLayer operations
type ObjectOptions struct {
ServerSideEncryption encrypt.ServerSide
UserDefined map[string]string
CheckCopyPrecondFn CheckCopyPreconditionFn
}

// LockType represents required locking for ObjectLayer operations
Expand Down
56 changes: 45 additions & 11 deletions cmd/object-api-utils.go
Expand Up @@ -422,30 +422,32 @@ type GetObjectReader struct {
pReader io.Reader

cleanUpFns []func()
precondFn func(ObjectInfo, string) bool
once sync.Once
}

// NewGetObjectReaderFromReader sets up a GetObjectReader with a given
// reader. This ignores any object properties.
func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, cleanupFns ...func()) *GetObjectReader {
func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, pcfn CheckCopyPreconditionFn, cleanupFns ...func()) *GetObjectReader {
return &GetObjectReader{
ObjInfo: oi,
pReader: r,
cleanUpFns: cleanupFns,
precondFn: pcfn,
}
}

// ObjReaderFn is a function type that takes a reader and returns
// GetObjectReader and an error. Request headers are passed to provide
// encryption parameters. cleanupFns allow cleanup funcs to be
// registered for calling after usage of the reader.
type ObjReaderFn func(inputReader io.Reader, h http.Header, cleanupFns ...func()) (r *GetObjectReader, err error)
type ObjReaderFn func(inputReader io.Reader, h http.Header, pcfn CheckCopyPreconditionFn, cleanupFns ...func()) (r *GetObjectReader, err error)

// NewGetObjectReader creates a new GetObjectReader. The cleanUpFns
// are called on Close() in reverse order as passed here. NOTE: It is
// assumed that clean up functions do not panic (otherwise, they may
// not all run!).
func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func()) (
func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, pcfn CheckCopyPreconditionFn, cleanUpFns ...func()) (
fn ObjReaderFn, off, length int64, err error) {

// Call the clean-up functions immediately in case of exit
Expand Down Expand Up @@ -486,7 +488,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func())
// a reader that returns the desired range of
// encrypted bytes. The header parameter is used to
// provide encryption parameters.
fn = func(inputReader io.Reader, h http.Header, cFns ...func()) (r *GetObjectReader, err error) {
fn = func(inputReader io.Reader, h http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
copySource := h.Get(crypto.SSECopyAlgorithm) != ""

cFns = append(cleanUpFns, cFns...)
Expand All @@ -501,9 +503,18 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func())
}
return nil, err
}

// Decrypt the ETag before top layer consumes this value.
oi.ETag = getDecryptedETag(h, oi, copySource)
encETag := oi.ETag
oi.ETag = getDecryptedETag(h, oi, copySource) // Decrypt the ETag before top layer consumes this value.

if pcfn != nil {
if ok := pcfn(oi, encETag); ok {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, PreConditionFailed{}
}
}

// Apply the skipLen and limit on the
// decrypted stream
Expand All @@ -514,6 +525,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func())
ObjInfo: oi,
pReader: decReader,
cleanUpFns: cFns,
precondFn: pcfn,
}
return r, nil
}
Expand Down Expand Up @@ -545,7 +557,17 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func())
return nil, 0, 0, errInvalidRange
}
}
fn = func(inputReader io.Reader, _ http.Header, cFns ...func()) (r *GetObjectReader, err error) {
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
cFns = append(cleanUpFns, cFns...)
if pcfn != nil {
if ok := pcfn(oi, ""); ok {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, PreConditionFailed{}
}
}
// Decompression reader.
snappyReader := snappy.NewReader(inputReader)
// Apply the skipLen and limit on the
Expand All @@ -557,7 +579,8 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func())
r = &GetObjectReader{
ObjInfo: oi,
pReader: decReader,
cleanUpFns: append(cleanUpFns, cFns...),
cleanUpFns: cFns,
precondFn: pcfn,
}
return r, nil
}
Expand All @@ -567,11 +590,22 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func())
if err != nil {
return nil, 0, 0, err
}
fn = func(inputReader io.Reader, _ http.Header, cFns ...func()) (r *GetObjectReader, err error) {
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
cFns = append(cleanUpFns, cFns...)
if pcfn != nil {
if ok := pcfn(oi, ""); ok {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, PreConditionFailed{}
}
}
r = &GetObjectReader{
ObjInfo: oi,
pReader: inputReader,
cleanUpFns: append(cleanUpFns, cFns...),
cleanUpFns: cFns,
precondFn: pcfn,
}
return r, nil
}
Expand Down
23 changes: 18 additions & 5 deletions cmd/object-handlers-common.go
Expand Up @@ -23,6 +23,7 @@ import (
"strings"
"time"

"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/handlers"
)
Expand All @@ -33,8 +34,8 @@ import (
// x-amz-copy-source-if-unmodified-since
// x-amz-copy-source-if-match
// x-amz-copy-source-if-none-match
func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool {
return checkCopyObjectPreconditions(ctx, w, r, objInfo)
func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo, encETag string) bool {
return checkCopyObjectPreconditions(ctx, w, r, objInfo, encETag)
}

// Validates the preconditions for CopyObject, returns true if CopyObject operation should not proceed.
Expand All @@ -43,11 +44,14 @@ func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter
// x-amz-copy-source-if-unmodified-since
// x-amz-copy-source-if-match
// x-amz-copy-source-if-none-match
func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool {
func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo, encETag string) bool {
// Return false for methods other than GET and HEAD.
if r.Method != "PUT" {
return false
}
if encETag == "" {
encETag = objInfo.ETag
}
// If the object doesn't have a modtime (IsZero), or the modtime
// is obviously garbage (Unix time == 0), then ignore modtimes
// and don't process the If-Modified-Since header.
Expand Down Expand Up @@ -95,11 +99,16 @@ func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r
}
}

ssec := crypto.SSECopy.IsRequested(r.Header)
// x-amz-copy-source-if-match : Return the object only if its entity tag (ETag) is the
// same as the one specified; otherwise return a 412 (precondition failed).
ifMatchETagHeader := r.Header.Get("x-amz-copy-source-if-match")
if ifMatchETagHeader != "" {
if objInfo.ETag != "" && !isETagEqual(objInfo.ETag, ifMatchETagHeader) {
etag := objInfo.ETag
if ssec {
etag = encETag[len(encETag)-32:]
}
if objInfo.ETag != "" && !isETagEqual(etag, ifMatchETagHeader) {
// If the object ETag does not match with the specified ETag.
writeHeaders()
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL, guessIsBrowserReq(r))
Expand All @@ -111,7 +120,11 @@ func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r
// one specified otherwise, return a 304 (not modified).
ifNoneMatchETagHeader := r.Header.Get("x-amz-copy-source-if-none-match")
if ifNoneMatchETagHeader != "" {
if objInfo.ETag != "" && isETagEqual(objInfo.ETag, ifNoneMatchETagHeader) {
etag := objInfo.ETag
if ssec {
etag = encETag[len(encETag)-32:]
}
if objInfo.ETag != "" && isETagEqual(etag, ifNoneMatchETagHeader) {
// If the object ETag matches with the specified ETag.
writeHeaders()
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL, guessIsBrowserReq(r))
Expand Down

0 comments on commit 5dd4e0f

Please sign in to comment.