Skip to content

Commit

Permalink
add encrypt option to cp, mirror,pipe,stat, cat and rm commands (#2400)
Browse files Browse the repository at this point in the history
  • Loading branch information
poornas authored and nitisht committed Mar 8, 2018
1 parent 4758556 commit 0ede95b
Show file tree
Hide file tree
Showing 25 changed files with 659 additions and 125 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ share Generate URL for sharing.
cp Copy files and objects.
mirror Mirror buckets and folders.
find Finds files which match the given set of parameters.
stat Stat contents of objects and folders.
diff List objects with size difference or missing between two folders or buckets.
rm Remove files and objects.
events Manage object notifications.
Expand Down
26 changes: 21 additions & 5 deletions cmd/cat-main.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,12 @@ import (
)

var (
catFlags = []cli.Flag{}
catFlags = []cli.Flag{
cli.StringFlag{
Name: "encrypt-key",
Usage: "Decrypt object (using server-side encryption)",
},
}
)

// Display contents of a file.
Expand All @@ -53,6 +58,9 @@ USAGE:
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}
ENVIRONMENT VARIABLES:
MC_ENCRYPT_KEY: List of comma delimited prefix=secret values
EXAMPLES:
1. Stream an object from Amazon S3 cloud storage to mplayer standard input.
$ {{.HelpName}} s3/ferenginar/klingon_opera_aktuh_maylotah.ogg | mplayer -
Expand All @@ -62,7 +70,9 @@ EXAMPLES:
3. Concatenate multiple files to one.
$ {{.HelpName}} part.* > complete.img
4. Stream a server encrypted object from Amazon S3 cloud storage to standard output.
$ {{.HelpName}} --encrypt-key 's3/ferenginar=32byteslongsecretkeymustbegiven1' s3/ferenginar/klingon_opera_aktuh_maylotah.ogg
`,
}

Expand Down Expand Up @@ -129,7 +139,7 @@ func checkCatSyntax(ctx *cli.Context) {
}

// catURL displays contents of a URL to stdout.
func catURL(sourceURL string) *probe.Error {
func catURL(sourceURL string, encKeyDB map[string][]prefixSSEPair) *probe.Error {
var reader io.Reader
size := int64(-1)
switch sourceURL {
Expand All @@ -146,7 +156,7 @@ func catURL(sourceURL string) *probe.Error {
if err == nil && client.GetURL().Type == objectStorage {
size = content.Size
}
if reader, err = getSourceStreamFromURL(sourceURL); err != nil {
if reader, err = getSourceStreamFromURL(sourceURL, encKeyDB); err != nil {
return err.Trace(sourceURL)
}
}
Expand Down Expand Up @@ -227,10 +237,16 @@ func mainCat(ctx *cli.Context) error {
}
}
}
sseKeys := os.Getenv("MC_ENCRYPT_KEY")
if key := ctx.String("encrypt-key"); key != "" {
sseKeys = key
}

encKeyDB, err := parseEncryptionKeys(sseKeys)
fatalIf(err, "Unable to parse encryption keys")
// Convert arguments to URLs: expand alias, fix format.
for _, url := range args {
fatalIf(catURL(url).Trace(url), "Unable to read from `"+url+"`.")
fatalIf(catURL(url, encKeyDB).Trace(url), "Unable to read from `"+url+"`.")
}
return nil
}
8 changes: 4 additions & 4 deletions cmd/client-fs.go
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ func (f *fsClient) put(reader io.Reader, size int64, metadata map[string][]strin
}

// Put - create a new file with metadata.
func (f *fsClient) Put(ctx context.Context, reader io.Reader, size int64, metadata map[string]string, progress io.Reader) (int64, *probe.Error) {
func (f *fsClient) Put(ctx context.Context, reader io.Reader, size int64, metadata map[string]string, progress io.Reader, sseKey string) (int64, *probe.Error) {
return f.put(reader, size, nil, progress)
}

Expand Down Expand Up @@ -368,7 +368,7 @@ func createFile(fpath string) (io.WriteCloser, error) {
}

// Copy - copy data from source to destination
func (f *fsClient) Copy(source string, size int64, progress io.Reader) *probe.Error {
func (f *fsClient) Copy(source string, size int64, progress io.Reader, srcSSEKey, tgtSSEKey string) *probe.Error {
// Don't use f.Get() f.Put() directly. Instead use readFile and createFile
destination := f.PathURL.Path
if destination == source { // Cannot copy file into itself
Expand Down Expand Up @@ -434,7 +434,7 @@ func (f *fsClient) get() (io.Reader, *probe.Error) {
}

// Get returns reader and any additional metadata.
func (f *fsClient) Get() (io.Reader, *probe.Error) {
func (f *fsClient) Get(sseKey string) (io.Reader, *probe.Error) {
return f.get()
}

Expand Down Expand Up @@ -968,7 +968,7 @@ func (f *fsClient) SetAccess(access string) *probe.Error {
}

// Stat - get metadata from path.
func (f *fsClient) Stat(isIncomplete, isFetchMeta bool) (content *clientContent, err *probe.Error) {
func (f *fsClient) Stat(isIncomplete, isFetchMeta bool, sseKey string) (content *clientContent, err *probe.Error) {
st, err := f.fsStat(isIncomplete)
if err != nil {
return nil, err.Trace(f.PathURL.String())
Expand Down
28 changes: 14 additions & 14 deletions cmd/client-fs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ func (s *TestSuite) TestList(c *C) {
var n int64
n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream",
}, nil)
}, nil, "")
c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data)))

Expand All @@ -56,7 +56,7 @@ func (s *TestSuite) TestList(c *C) {
reader = bytes.NewReader([]byte(data))
n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream",
}, nil)
}, nil, "")
c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data)))

Expand Down Expand Up @@ -84,7 +84,7 @@ func (s *TestSuite) TestList(c *C) {
reader = bytes.NewReader([]byte(data))
n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream",
}, nil)
}, nil, "")
c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data)))

Expand Down Expand Up @@ -144,7 +144,7 @@ func (s *TestSuite) TestList(c *C) {
reader = bytes.NewReader([]byte(data))
n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream",
}, nil)
}, nil, "")
c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data)))

Expand Down Expand Up @@ -210,7 +210,7 @@ func (s *TestSuite) TestStatBucket(c *C) {
c.Assert(err, IsNil)
err = fsClient.MakeBucket("us-east-1", true)
c.Assert(err, IsNil)
_, err = fsClient.Stat(false, false)
_, err = fsClient.Stat(false, false, "")
c.Assert(err, IsNil)
}

Expand Down Expand Up @@ -251,7 +251,7 @@ func (s *TestSuite) TestPut(c *C) {
var n int64
n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream",
}, nil)
}, nil, "")
c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data)))
}
Expand All @@ -271,11 +271,11 @@ func (s *TestSuite) TestGet(c *C) {
reader = bytes.NewReader([]byte(data))
n, err := fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream",
}, nil)
}, nil, "")
c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data)))

reader, err = fsClient.Get()
reader, err = fsClient.Get("")
c.Assert(err, IsNil)
var results bytes.Buffer
_, e = io.Copy(&results, reader)
Expand All @@ -299,11 +299,11 @@ func (s *TestSuite) TestGetRange(c *C) {
reader = bytes.NewReader([]byte(data))
n, err := fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream",
}, nil)
}, nil, "")
c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data)))

reader, err = fsClient.Get()
reader, err = fsClient.Get("")
c.Assert(err, IsNil)
var results bytes.Buffer
buf := make([]byte, 5)
Expand All @@ -330,11 +330,11 @@ func (s *TestSuite) TestStatObject(c *C) {
reader := bytes.NewReader([]byte(data))
n, err := fsClient.Put(context.Background(), reader, int64(dataLen), map[string]string{
"Content-Type": "application/octet-stream",
}, nil)
}, nil, "")
c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data)))

content, err := fsClient.Stat(false, false)
content, err := fsClient.Stat(false, false, "")
c.Assert(err, IsNil)
c.Assert(content.Size, Equals, int64(dataLen))
}
Expand All @@ -356,10 +356,10 @@ func (s *TestSuite) TestCopy(c *C) {
reader = bytes.NewReader([]byte(data))
n, err := fsClientSource.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream",
}, nil)
}, nil, "")
c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data)))

err = fsClientTarget.Copy(sourcePath, int64(len(data)), nil)
err = fsClientTarget.Copy(sourcePath, int64(len(data)), nil, "", "")
c.Assert(err, IsNil)
}
59 changes: 43 additions & 16 deletions cmd/client-s3.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package cmd
import (
"context"
"crypto/tls"
"encoding/base64"
"hash/fnv"
"io"
"net"
Expand Down Expand Up @@ -506,9 +507,16 @@ func (c *s3Client) Watch(params watchParams) (*watchObject, *probe.Error) {
}

// Get - get object with metadata.
func (c *s3Client) Get() (io.Reader, *probe.Error) {
func (c *s3Client) Get(sseKey string) (io.Reader, *probe.Error) {
bucket, object := c.url2BucketAndObject()
reader, e := c.api.GetObject(bucket, object, minio.GetObjectOptions{})
var opts minio.GetObjectOptions
if sseKey != "" {
key := minio.NewSSEInfo([]byte(sseKey), "AES256")
for k, v := range key.GetSSEHeaders() {
opts.Set(k, v)
}
}
reader, e := c.api.GetObject(bucket, object, opts)
if e != nil {
errResponse := minio.ToErrorResponse(e)
if errResponse.Code == "NoSuchBucket" {
Expand All @@ -530,19 +538,29 @@ func (c *s3Client) Get() (io.Reader, *probe.Error) {
}

// Copy - copy object
func (c *s3Client) Copy(source string, size int64, progress io.Reader) *probe.Error {
func (c *s3Client) Copy(source string, size int64, progress io.Reader, srcSSEKey, tgtSSEKey string) *probe.Error {
dstBucket, dstObject := c.url2BucketAndObject()
if dstBucket == "" {
return probe.NewError(BucketNameEmpty{})
}

tokens := splitStr(source, string(c.targetURL.Separator), 3)

var srcKeyPtr, tgtKeyPtr *minio.SSEInfo
if srcSSEKey != "" {
srcKey := minio.NewSSEInfo([]byte(srcSSEKey), "AES256")
srcKeyPtr = &srcKey
}
if tgtSSEKey != "" {
tgtKey := minio.NewSSEInfo([]byte(tgtSSEKey), "AES256")
tgtKeyPtr = &tgtKey
}

// Source object
src := minio.NewSourceInfo(tokens[1], tokens[2], nil)
src := minio.NewSourceInfo(tokens[1], tokens[2], srcKeyPtr)

// Destination object
dst, e := minio.NewDestinationInfo(dstBucket, dstObject, nil, nil)
dst, e := minio.NewDestinationInfo(dstBucket, dstObject, tgtKeyPtr, nil)
if e != nil {
return probe.NewError(e)
}
Expand Down Expand Up @@ -578,7 +596,7 @@ func (c *s3Client) Copy(source string, size int64, progress io.Reader) *probe.Er
}

// Put - upload an object with custom metadata.
func (c *s3Client) Put(ctx context.Context, reader io.Reader, size int64, metadata map[string]string, progress io.Reader) (int64, *probe.Error) {
func (c *s3Client) Put(ctx context.Context, reader io.Reader, size int64, metadata map[string]string, progress io.Reader, sseKey string) (int64, *probe.Error) {
bucket, object := c.url2BucketAndObject()
contentType, ok := metadata["Content-Type"]
if ok {
Expand Down Expand Up @@ -612,6 +630,11 @@ func (c *s3Client) Put(ctx context.Context, reader io.Reader, size int64, metada
if ok {
delete(metadata, "X-Amz-Storage-Class")
}
if sseKey != "" {
metadata["x-amz-server-side-encryption-customer-algorithm"] = "AES256"
metadata["x-amz-server-side-encryption-customer-key"] = base64.StdEncoding.EncodeToString([]byte(sseKey))
metadata["x-amz-server-side-encryption-customer-key-MD5"] = sumMD5Base64([]byte(sseKey))
}
if bucket == "" {
return 0, probe.NewError(BucketNameEmpty{})
}
Expand Down Expand Up @@ -859,7 +882,7 @@ func (c *s3Client) listObjectWrapper(bucket, object string, isRecursive bool, do
}

// Stat - send a 'HEAD' on a bucket or object to fetch its metadata.
func (c *s3Client) Stat(isIncomplete, isFetchMeta bool) (*clientContent, *probe.Error) {
func (c *s3Client) Stat(isIncomplete, isFetchMeta bool, sseKey string) (*clientContent, *probe.Error) {
c.mutex.Lock()
defer c.mutex.Unlock()
bucket, object := c.url2BucketAndObject()
Expand Down Expand Up @@ -915,7 +938,12 @@ func (c *s3Client) Stat(isIncomplete, isFetchMeta bool) (*clientContent, *probe.
}
return nil, probe.NewError(ObjectMissing{})
}

opts := minio.StatObjectOptions{}
if sseKey != "" {
opts.Set("x-amz-server-side-encryption-customer-algorithm", "AES256")
opts.Set("x-amz-server-side-encryption-customer-key", base64.StdEncoding.EncodeToString([]byte(sseKey)))
opts.Set("x-amz-server-side-encryption-customer-key-MD5", sumMD5Base64([]byte(sseKey)))
}
for objectStat := range c.listObjectWrapper(bucket, object, nonRecursive, nil) {
if objectStat.Err != nil {
return nil, probe.NewError(objectStat.Err)
Expand All @@ -929,7 +957,7 @@ func (c *s3Client) Stat(isIncomplete, isFetchMeta bool) (*clientContent, *probe.
objectMetadata.Metadata = map[string]string{}
objectMetadata.EncryptionHeaders = map[string]string{}
if isFetchMeta {
stat, err := c.getObjectStat(bucket, object)
stat, err := c.getObjectStat(bucket, object, opts)
if err != nil {
return nil, err
}
Expand All @@ -944,7 +972,7 @@ func (c *s3Client) Stat(isIncomplete, isFetchMeta bool) (*clientContent, *probe.
objectMetadata.Type = os.ModeDir

if isFetchMeta {
stat, err := c.getObjectStat(bucket, object)
stat, err := c.getObjectStat(bucket, object, opts)
if err != nil {
return nil, err
}
Expand All @@ -955,14 +983,13 @@ func (c *s3Client) Stat(isIncomplete, isFetchMeta bool) (*clientContent, *probe.
return objectMetadata, nil
}
}
return c.getObjectStat(bucket, object)
return c.getObjectStat(bucket, object, opts)
}

// getObjectStat returns the metadata of an object from a HEAD call.
func (c *s3Client) getObjectStat(bucket, object string) (*clientContent, *probe.Error) {
func (c *s3Client) getObjectStat(bucket, object string, opts minio.StatObjectOptions) (*clientContent, *probe.Error) {
objectMetadata := &clientContent{}

objectStat, e := c.api.StatObject(bucket, object, minio.StatObjectOptions{})
objectStat, e := c.api.StatObject(bucket, object, opts)
if e != nil {
errResponse := minio.ToErrorResponse(e)
if errResponse.Code == "AccessDenied" {
Expand Down Expand Up @@ -1342,7 +1369,7 @@ func (c *s3Client) listIncompleteRecursiveInRoutineDirOpt(contentCh chan *client
} else if strings.HasSuffix(object, string(c.targetURL.Separator)) {
// Get stat of given object is a directory.
isIncomplete := true
content, perr := c.Stat(isIncomplete, false)
content, perr := c.Stat(isIncomplete, false, "")
cContent = content
if perr != nil {
contentCh <- &clientContent{URL: *c.targetURL, Err: perr}
Expand Down Expand Up @@ -1496,7 +1523,7 @@ func (c *s3Client) listRecursiveInRoutineDirOpt(contentCh chan *clientContent, d
// Get stat of given object is a directory.
isIncomplete := false
isFetchMeta := false
content, perr := c.Stat(isIncomplete, isFetchMeta)
content, perr := c.Stat(isIncomplete, isFetchMeta, "")
cContent = content
if perr != nil {
contentCh <- &clientContent{URL: *c.targetURL, Err: perr}
Expand Down
4 changes: 2 additions & 2 deletions cmd/client-s3_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -222,11 +222,11 @@ func (s *TestSuite) TestObjectOperations(c *C) {
reader = bytes.NewReader(object.data)
n, err := s3c.Put(context.Background(), reader, int64(len(object.data)), map[string]string{
"Content-Type": "application/octet-stream",
}, nil)
}, nil, "")
c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(object.data)))

reader, err = s3c.Get()
reader, err = s3c.Get("")
c.Assert(err, IsNil)
var buffer bytes.Buffer
{
Expand Down

0 comments on commit 0ede95b

Please sign in to comment.