Skip to content

Commit

Permalink
Move CopyObject precondition checks into GetObjectReader
Browse files Browse the repository at this point in the history
in order to perform SSE-C pre-condition checks using the
last 32 bytes of encrypted ETag rather than the decrypted
ETag
  • Loading branch information
poornas authored and vadmeste committed Feb 26, 2019
1 parent 15210cb commit 693af64
Show file tree
Hide file tree
Showing 15 changed files with 98 additions and 43 deletions.
2 changes: 1 addition & 1 deletion cmd/disk-cache.go
Expand Up @@ -254,7 +254,7 @@ func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string,

cleanupBackend := func() { bkReader.Close() }
cleanupPipe := func() { pipeReader.Close() }
gr = NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, cleanupBackend, cleanupPipe)
gr = NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, opts.CheckCopyPrecondFn, cleanupBackend, cleanupPipe)
return gr, nil
}

Expand Down
1 change: 0 additions & 1 deletion cmd/encryption-v1.go
Expand Up @@ -915,7 +915,6 @@ func getDecryptedETag(headers http.Header, objInfo ObjectInfo, copySource bool)
key [32]byte
err error
)

// If ETag is contentMD5Sum return it as is.
if len(objInfo.ETag) == 32 {
return objInfo.ETag
Expand Down
6 changes: 3 additions & 3 deletions cmd/fs-v1.go
Expand Up @@ -494,7 +494,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
if hasSuffix(object, slashSeparator) {
// The lock taken above is released when
// objReader.Close() is called by the caller.
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), ObjectInfo{}, nsUnlocker), nil
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), ObjectInfo{}, opts.CheckCopyPrecondFn, nsUnlocker), nil
}

// Otherwise we get the object info
Expand All @@ -519,7 +519,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
rwPoolUnlocker = func() { fs.rwPool.Close(fsMetaPath) }
}

objReaderFn, off, length, rErr := NewGetObjectReader(rs, objInfo, nsUnlocker, rwPoolUnlocker)
objReaderFn, off, length, rErr := NewGetObjectReader(rs, objInfo, opts.CheckCopyPrecondFn, nsUnlocker, rwPoolUnlocker)
if rErr != nil {
return nil, rErr
}
Expand Down Expand Up @@ -547,7 +547,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
return nil, err
}

return objReaderFn(reader, h, closeFn)
return objReaderFn(reader, h, opts.CheckCopyPrecondFn, closeFn)
}

// GetObject - reads an object from the disk.
Expand Down
2 changes: 1 addition & 1 deletion cmd/gateway/azure/gateway-azure.go
Expand Up @@ -651,7 +651,7 @@ func (a *azureObjects) GetObjectNInfo(ctx context.Context, bucket, object string
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser), nil
}

// GetObject - reads an object from azure. Supports additional
Expand Down
2 changes: 1 addition & 1 deletion cmd/gateway/b2/gateway-b2.go
Expand Up @@ -417,7 +417,7 @@ func (l *b2Objects) GetObjectNInfo(ctx context.Context, bucket, object string, r
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser), nil
}

// GetObject reads an object from B2. Supports additional
Expand Down
2 changes: 1 addition & 1 deletion cmd/gateway/gcs/gateway-gcs.go
Expand Up @@ -753,7 +753,7 @@ func (l *gcsGateway) GetObjectNInfo(ctx context.Context, bucket, object string,
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser), nil
}

// GetObject - reads an object from GCS. Supports additional
Expand Down
4 changes: 2 additions & 2 deletions cmd/gateway/oss/gateway-oss.go
Expand Up @@ -26,7 +26,7 @@ import (
"strings"

"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/dustin/go-humanize"
humanize "github.com/dustin/go-humanize"

"github.com/minio/cli"
miniogopolicy "github.com/minio/minio-go/pkg/policy"
Expand Down Expand Up @@ -569,7 +569,7 @@ func (l *ossObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser), nil
}

// GetObject reads an object on OSS. Supports additional
Expand Down
4 changes: 2 additions & 2 deletions cmd/gateway/s3/gateway-s3-sse.go
Expand Up @@ -313,7 +313,7 @@ func (l *s3EncObjects) GetObjectNInfo(ctx context.Context, bucket, object string
return l.s3Objects.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
}
objInfo.UserDefined = minio.CleanMinioInternalMetadataKeys(objInfo.UserDefined)
fn, off, length, err := minio.NewGetObjectReader(rs, objInfo)
fn, off, length, err := minio.NewGetObjectReader(rs, objInfo, o.CheckCopyPrecondFn)
if err != nil {
return nil, minio.ErrorRespToObjectError(err)
}
Expand All @@ -329,7 +329,7 @@ func (l *s3EncObjects) GetObjectNInfo(ctx context.Context, bucket, object string
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return fn(pr, h, pipeCloser)
return fn(pr, h, o.CheckCopyPrecondFn, pipeCloser)
}

// GetObjectInfo reads object info and replies back ObjectInfo
Expand Down
2 changes: 1 addition & 1 deletion cmd/gateway/s3/gateway-s3.go
Expand Up @@ -400,7 +400,7 @@ func (l *s3Objects) GetObjectNInfo(ctx context.Context, bucket, object string, r
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser), nil
}

// GetObject reads an object from S3. Supports additional
Expand Down
12 changes: 12 additions & 0 deletions cmd/object-api-errors.go
Expand Up @@ -383,3 +383,15 @@ func isErrObjectNotFound(err error) bool {
_, ok := err.(ObjectNotFound)
return ok
}

// PreConditionFailed - Check if copy precondition failed
type PreConditionFailed struct{}

func (e PreConditionFailed) Error() string {
return "At least one of the pre-conditions you specified did not hold"
}

func isErrPreconditionFailed(err error) bool {
_, ok := err.(PreConditionFailed)
return ok
}
4 changes: 4 additions & 0 deletions cmd/object-api-interface.go
Expand Up @@ -26,10 +26,14 @@ import (
"github.com/minio/minio/pkg/policy"
)

// CheckCopyPreconditionFn returns true if copy precondition check failed.
type CheckCopyPreconditionFn func(o ObjectInfo, encETag string) bool

// ObjectOptions represents object options for ObjectLayer operations
type ObjectOptions struct {
ServerSideEncryption encrypt.ServerSide
UserDefined map[string]string
CheckCopyPrecondFn CheckCopyPreconditionFn
}

// LockType represents required locking for ObjectLayer operations
Expand Down
56 changes: 45 additions & 11 deletions cmd/object-api-utils.go
Expand Up @@ -422,30 +422,32 @@ type GetObjectReader struct {
pReader io.Reader

cleanUpFns []func()
precondFn func(ObjectInfo, string) bool
once sync.Once
}

// NewGetObjectReaderFromReader sets up a GetObjectReader with a given
// reader. This ignores any object properties.
func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, cleanupFns ...func()) *GetObjectReader {
func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, pcfn CheckCopyPreconditionFn, cleanupFns ...func()) *GetObjectReader {
return &GetObjectReader{
ObjInfo: oi,
pReader: r,
cleanUpFns: cleanupFns,
precondFn: pcfn,
}
}

// ObjReaderFn is a function type that takes a reader and returns
// GetObjectReader and an error. Request headers are passed to provide
// encryption parameters. cleanupFns allow cleanup funcs to be
// registered for calling after usage of the reader.
type ObjReaderFn func(inputReader io.Reader, h http.Header, cleanupFns ...func()) (r *GetObjectReader, err error)
type ObjReaderFn func(inputReader io.Reader, h http.Header, pcfn CheckCopyPreconditionFn, cleanupFns ...func()) (r *GetObjectReader, err error)

// NewGetObjectReader creates a new GetObjectReader. The cleanUpFns
// are called on Close() in reverse order as passed here. NOTE: It is
// assumed that clean up functions do not panic (otherwise, they may
// not all run!).
func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func()) (
func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, pcfn CheckCopyPreconditionFn, cleanUpFns ...func()) (
fn ObjReaderFn, off, length int64, err error) {

// Call the clean-up functions immediately in case of exit
Expand Down Expand Up @@ -486,7 +488,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func())
// a reader that returns the desired range of
// encrypted bytes. The header parameter is used to
// provide encryption parameters.
fn = func(inputReader io.Reader, h http.Header, cFns ...func()) (r *GetObjectReader, err error) {
fn = func(inputReader io.Reader, h http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
copySource := h.Get(crypto.SSECopyAlgorithm) != ""

cFns = append(cleanUpFns, cFns...)
Expand All @@ -501,9 +503,18 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func())
}
return nil, err
}

// Decrypt the ETag before top layer consumes this value.
oi.ETag = getDecryptedETag(h, oi, copySource)
encETag := oi.ETag
oi.ETag = getDecryptedETag(h, oi, copySource) // Decrypt the ETag before top layer consumes this value.

if pcfn != nil {
if ok := pcfn(oi, encETag); ok {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, PreConditionFailed{}
}
}

// Apply the skipLen and limit on the
// decrypted stream
Expand All @@ -514,6 +525,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func())
ObjInfo: oi,
pReader: decReader,
cleanUpFns: cFns,
precondFn: pcfn,
}
return r, nil
}
Expand Down Expand Up @@ -545,7 +557,17 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func())
return nil, 0, 0, errInvalidRange
}
}
fn = func(inputReader io.Reader, _ http.Header, cFns ...func()) (r *GetObjectReader, err error) {
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
cFns = append(cleanUpFns, cFns...)
if pcfn != nil {
if ok := pcfn(oi, ""); ok {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, PreConditionFailed{}
}
}
// Decompression reader.
snappyReader := snappy.NewReader(inputReader)
// Apply the skipLen and limit on the
Expand All @@ -557,7 +579,8 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func())
r = &GetObjectReader{
ObjInfo: oi,
pReader: decReader,
cleanUpFns: append(cleanUpFns, cFns...),
cleanUpFns: cFns,
precondFn: pcfn,
}
return r, nil
}
Expand All @@ -567,11 +590,22 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func())
if err != nil {
return nil, 0, 0, err
}
fn = func(inputReader io.Reader, _ http.Header, cFns ...func()) (r *GetObjectReader, err error) {
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
cFns = append(cleanUpFns, cFns...)
if pcfn != nil {
if ok := pcfn(oi, ""); ok {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, PreConditionFailed{}
}
}
r = &GetObjectReader{
ObjInfo: oi,
pReader: inputReader,
cleanUpFns: append(cleanUpFns, cFns...),
cleanUpFns: cFns,
precondFn: pcfn,
}
return r, nil
}
Expand Down
13 changes: 8 additions & 5 deletions cmd/object-handlers-common.go
Expand Up @@ -23,6 +23,7 @@ import (
"strings"
"time"

"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/handlers"
)
Expand All @@ -33,8 +34,8 @@ import (
// x-amz-copy-source-if-unmodified-since
// x-amz-copy-source-if-match
// x-amz-copy-source-if-none-match
func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool {
return checkCopyObjectPreconditions(ctx, w, r, objInfo)
func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo, encETag string) bool {
return checkCopyObjectPreconditions(ctx, w, r, objInfo, encETag)
}

// Validates the preconditions for CopyObject, returns true if CopyObject operation should not proceed.
Expand All @@ -43,7 +44,7 @@ func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter
// x-amz-copy-source-if-unmodified-since
// x-amz-copy-source-if-match
// x-amz-copy-source-if-none-match
func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool {
func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo, encETag string) bool {
// Return false for methods other than GET and HEAD.
if r.Method != "PUT" {
return false
Expand Down Expand Up @@ -95,11 +96,13 @@ func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r
}
}

ssec := crypto.SSECopy.IsRequested(r.Header)

// x-amz-copy-source-if-match : Return the object only if its entity tag (ETag) is the
// same as the one specified; otherwise return a 412 (precondition failed).
ifMatchETagHeader := r.Header.Get("x-amz-copy-source-if-match")
if ifMatchETagHeader != "" {
if objInfo.ETag != "" && !isETagEqual(objInfo.ETag, ifMatchETagHeader) {
if objInfo.ETag != "" && (!isETagEqual(objInfo.ETag, ifMatchETagHeader) || (ssec && !isETagEqual(encETag[len(encETag)-32:], ifMatchETagHeader))) {
// If the object ETag does not match with the specified ETag.
writeHeaders()
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL, guessIsBrowserReq(r))
Expand All @@ -111,7 +114,7 @@ func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r
// one specified otherwise, return a 304 (not modified).
ifNoneMatchETagHeader := r.Header.Get("x-amz-copy-source-if-none-match")
if ifNoneMatchETagHeader != "" {
if objInfo.ETag != "" && isETagEqual(objInfo.ETag, ifNoneMatchETagHeader) {
if objInfo.ETag != "" && (isETagEqual(objInfo.ETag, ifNoneMatchETagHeader) || (ssec && isETagEqual(encETag[len(encETag)-32:], ifNoneMatchETagHeader))) {
// If the object ETag matches with the specified ETag.
writeHeaders()
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL, guessIsBrowserReq(r))
Expand Down
25 changes: 14 additions & 11 deletions cmd/object-handlers.go
Expand Up @@ -761,21 +761,22 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
if !cpSrcDstSame {
lock = readLock
}

checkCopyPrecondFn := func(o ObjectInfo, encETag string) bool {
return checkCopyObjectPreconditions(ctx, w, r, o, encETag)
}
getOpts.CheckCopyPrecondFn = checkCopyPrecondFn
var rs *HTTPRangeSpec
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, lock, getOpts)
if err != nil {
if isErrPreconditionFailed(err) {
return
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
defer gr.Close()
srcInfo := gr.ObjInfo

// Verify before x-amz-copy-source preconditions before continuing with CopyObject.
if checkCopyObjectPreconditions(ctx, w, r, srcInfo) {
return
}

/// maximum Upload size for object in a single CopyObject operation.
if isMaxObjectSize(srcInfo.Size) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r))
Expand Down Expand Up @@ -1597,9 +1598,16 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt

}
}
checkCopyPartPrecondFn := func(o ObjectInfo, encETag string) bool {
return checkCopyObjectPartPreconditions(ctx, w, r, o, encETag)
}
getOpts.CheckCopyPrecondFn = checkCopyPartPrecondFn

gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, readLock, getOpts)
if err != nil {
if isErrPreconditionFailed(err) {
return
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
Expand All @@ -1621,11 +1629,6 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
return
}

// Verify before x-amz-copy-source preconditions before continuing with CopyObject.
if checkCopyObjectPartPreconditions(ctx, w, r, srcInfo) {
return
}

// Get the object offset & length
startOffset, length, err := rs.GetOffsetLength(actualPartSize)
if err != nil {
Expand Down

0 comments on commit 693af64

Please sign in to comment.