diff --git a/src/server/pfs/server/obj_block_api_server.go b/src/server/pfs/server/obj_block_api_server.go index a396b69cd4b..d8f4baca387 100644 --- a/src/server/pfs/server/obj_block_api_server.go +++ b/src/server/pfs/server/obj_block_api_server.go @@ -9,8 +9,8 @@ import ( "io/ioutil" "time" - "go.pedge.io/lion/proto" - "go.pedge.io/proto/rpclog" + protolion "go.pedge.io/lion" + protorpclog "go.pedge.io/proto/rpclog" "golang.org/x/net/context" "golang.org/x/sync/errgroup" @@ -73,6 +73,14 @@ func newObjBlockAPIServer(dir string, cacheBytes int64, objClient obj.Client) (* }, nil } +func newMinioBlockAPIServer(dir string, cacheBytes int64) (*objBlockAPIServer, error) { + objClient, err := obj.NewMinioClientFromSecret("") + if err != nil { + return nil, err + } + return newObjBlockAPIServer(dir, cacheBytes, objClient) +} + func newAmazonBlockAPIServer(dir string, cacheBytes int64) (*objBlockAPIServer, error) { objClient, err := obj.NewAmazonClientFromSecret("") if err != nil { diff --git a/src/server/pfs/server/server.go b/src/server/pfs/server/server.go index 99c90b081d9..e2fdf787359 100644 --- a/src/server/pfs/server/server.go +++ b/src/server/pfs/server/server.go @@ -13,8 +13,9 @@ var ( MaxMsgSize = 3 * blockSize ) -// Valid backends +// Valid object storage backends const ( + MinioBackendEnvVar = "MINIO" AmazonBackendEnvVar = "AMAZON" GoogleBackendEnvVar = "GOOGLE" MicrosoftBackendEnvVar = "MICROSOFT" @@ -40,10 +41,20 @@ func NewObjBlockAPIServer(dir string, cacheBytes int64, objClient obj.Client) (p return newObjBlockAPIServer(dir, cacheBytes, objClient) } -// NewBlockAPIServer creates a BlockAPIServer using the credentials it finds in -// the environment +// NewBlockAPIServer creates a BlockAPIServer using the credentials +// it finds in the environment func NewBlockAPIServer(dir string, cacheBytes int64, backend string) (pfsclient.BlockAPIServer, error) { switch backend { + case MinioBackendEnvVar: + // S3 compatible doesn't like leading slashes + if len(dir) > 0 && dir[0] == '/' { + dir = dir[1:] + } + blockAPIServer, err := newMinioBlockAPIServer(dir, cacheBytes) + if err != nil { + return nil, err + } + return blockAPIServer, nil case AmazonBackendEnvVar: // amazon doesn't like leading slashes if len(dir) > 0 && dir[0] == '/' { diff --git a/src/server/pkg/deploy/assets/assets.go b/src/server/pkg/deploy/assets/assets.go index b96e0e166c3..45c64e6f401 100644 --- a/src/server/pkg/deploy/assets/assets.go +++ b/src/server/pkg/deploy/assets/assets.go @@ -31,6 +31,7 @@ var ( rethinkHeadlessName = "rethink-headless" // headless service; give Rethink pods consistent DNS addresses rethinkVolumeName = "rethink-volume" rethinkVolumeClaimName = "rethink-volume-claim" + minioSecretName = "minio-secret" amazonSecretName = "amazon-secret" googleSecretName = "google-secret" microsoftSecretName = "microsoft-secret" @@ -51,6 +52,7 @@ const ( amazonBackend googleBackend microsoftBackend + minioBackend ) // ServiceAccount returns a kubernetes service account for use with Pachyderm. @@ -107,6 +109,23 @@ func PachdRc(shards uint64, backend backend, hostPath string, logLevel string, v volumes[0].HostPath = &api.HostPathVolumeSource{ Path: filepath.Join(hostPath, "pachd"), } + case minioBackend: + backendEnvVar = server.MinioBackendEnvVar + volumes[0].HostPath = &api.HostPathVolumeSource{ + Path: filepath.Join(hostPath, "pachd"), + } + volumes = append(volumes, api.Volume{ + Name: minioSecretName, + VolumeSource: api.VolumeSource{ + Secret: &api.SecretVolumeSource{ + SecretName: minioSecretName, + }, + }, + }) + volumeMounts = append(volumeMounts, api.VolumeMount{ + Name: minioSecretName, + MountPath: "/" + minioSecretName, + }) case amazonBackend: backendEnvVar = server.AmazonBackendEnvVar volumes = append(volumes, api.Volume{ @@ -662,6 +681,36 @@ func InitJob(version string) *extensions.Job { } } +// MinioSecret creates an amazon secret with the following parameters: +// bucket - S3 bucket name +// id - S3 access key id +// secret - S3 secret access key +// endpoint - S3 compatible endpoint +// secure - set to true for a secure connection. +func MinioSecret(bucket string, id string, secret string, endpoint string, secure bool) *api.Secret { + secureV := "0" + if secure { + secureV = "1" + } + return &api.Secret{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: api.ObjectMeta{ + Name: minioSecretName, + Labels: labels(minioSecretName), + }, + Data: map[string][]byte{ + "bucket": []byte(bucket), + "id": []byte(id), + "secret": []byte(secret), + "endpoint": []byte(endpoint), + "secure": []byte(secureV), + }, + } +} + // AmazonSecret creates an amazon secret with the following parameters: // bucket - S3 bucket name // id - AWS access key id @@ -730,7 +779,7 @@ func MicrosoftSecret(container string, id string, secret string) *api.Secret { // WriteRethinkVolumes creates 'shards' persistent volumes, either backed by IAAS persistent volumes (EBS volumes for amazon, GCP volumes for Google, etc) // or local volumes (if 'backend' == 'local'). All volumes are created with size 'size'. func WriteRethinkVolumes(w io.Writer, backend backend, shards int, hostPath string, names []string, size int) error { - if backend != localBackend && len(names) < shards { + if backend != localBackend && backend != minioBackend && len(names) < shards { return fmt.Errorf("could not create non-local rethink cluster with %d shards, as there are only %d external volumes", shards, len(names)) } encoder := codec.NewEncoder(w, jsonEncoderHandle) @@ -779,6 +828,8 @@ func WriteRethinkVolumes(w io.Writer, backend backend, shards int, hostPath stri DataDiskURI: dataDiskURI, }, } + case minioBackend: + fallthrough case localBackend: spec.Spec.PersistentVolumeSource = api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{ @@ -860,12 +911,12 @@ func WriteAssets(w io.Writer, opts *AssetOpts, backend backend, fmt.Fprintf(w, "\n") RethinkHeadlessService().CodecEncodeSelf(encoder) } else { - if backend != localBackend && len(volumeNames) != 1 { + if backend != localBackend && backend != minioBackend && len(volumeNames) != 1 { return fmt.Errorf("RethinkDB can only be managed by a ReplicationController as a single instance, but recieved %d volumes", len(volumeNames)) } RethinkVolumeClaim(volumeSize).CodecEncodeSelf(encoder) volumeName := "" - if backend != localBackend { + if backend != localBackend && backend != minioBackend { volumeName = volumeNames[0] } RethinkRc(volumeName, opts.RethinkdbCacheSize).CodecEncodeSelf(encoder) @@ -887,6 +938,18 @@ func WriteLocalAssets(w io.Writer, opts *AssetOpts, hostPath string) error { return WriteAssets(w, opts, localBackend, nil, 1 /* = volume size (gb) */, hostPath) } +// WriteMinioAssets writes assets to an s3 backend. +func WriteMinioAssets(w io.Writer, opts *AssetOpts, hostPath string, bucket string, id string, + secret string, endpoint string, secure bool) error { + if err := WriteAssets(w, opts, minioBackend, nil, 1 /* = volume size (gb) */, hostPath); err != nil { + return err + } + encoder := codec.NewEncoder(w, jsonEncoderHandle) + MinioSecret(bucket, id, secret, endpoint, secure).CodecEncodeSelf(encoder) + fmt.Fprintf(w, "\n") + return nil +} + // WriteAmazonAssets writes assets to an amazon backend. func WriteAmazonAssets(w io.Writer, opts *AssetOpts, bucket string, id string, secret string, token string, region string, volumeNames []string, volumeSize int) error { diff --git a/src/server/pkg/deploy/cmds/cmds.go b/src/server/pkg/deploy/cmds/cmds.go index 7c35875cf4e..411947ecebc 100644 --- a/src/server/pkg/deploy/cmds/cmds.go +++ b/src/server/pkg/deploy/cmds/cmds.go @@ -17,6 +17,7 @@ import ( _metrics "github.com/pachyderm/pachyderm/src/server/pkg/metrics" "github.com/spf13/cobra" + "go.pedge.io/pkg/cobra" ) func maybeKcCreate(dryRun bool, manifest *bytes.Buffer) error { @@ -40,6 +41,7 @@ func DeployCmd(noMetrics *bool) *cobra.Command { var hostPath string var dev bool var dryRun bool + var secure bool var deployRethinkAsRc bool var deployRethinkAsStatefulSet bool var rethinkdbCacheSize string @@ -94,6 +96,28 @@ func DeployCmd(noMetrics *bool) *cobra.Command { }), } + deployMinio := &cobra.Command{ + Use: "minio [-s] ", + Short: "Deploy a Pachyderm cluster running locally.", + Long: "Deploy a Pachyderm cluster running locally. Arguments are:\n" + + " : A Minio bucket where Pachyderm will store PFS data.\n" + + " , , : Access credentials.\n" + + " : Secure connection.\n", + Run: pkgcobra.RunBoundedArgs(pkgcobra.Bounds{Min: 4, Max: 4}, func(args []string) (retErr error) { + if metrics && !dev { + metricsFn := _metrics.ReportAndFlushUserAction("Deploy") + defer func(start time.Time) { metricsFn(start, retErr) }(time.Now()) + } + manifest := &bytes.Buffer{} + err := assets.WriteMinioAssets(manifest, opts, hostPath, args[0], args[1], args[2], args[3], secure) + if err != nil { + return err + } + return maybeKcCreate(dryRun, manifest) + }), + } + deployMinio.Flags().BoolVarP(&secure, "secure", "s", false, "Enable secure access to Minio server.") + deployAmazon := &cobra.Command{ Use: "amazon ", Short: "Deploy a Pachyderm cluster running on AWS.", @@ -206,6 +230,7 @@ func DeployCmd(noMetrics *bool) *cobra.Command { deploy.AddCommand(deployAmazon) deploy.AddCommand(deployGoogle) deploy.AddCommand(deployMicrosoft) + deploy.AddCommand(deployMinio) return deploy } diff --git a/src/server/pkg/obj/minio_client.go b/src/server/pkg/obj/minio_client.go new file mode 100644 index 00000000000..06d9917a0b7 --- /dev/null +++ b/src/server/pkg/obj/minio_client.go @@ -0,0 +1,107 @@ +package obj + +import ( + "io" + + minio "github.com/minio/minio-go" +) + +// Represents minio client instance for any s3 compatible server. +type minioClient struct { + *minio.Client + bucket string +} + +func newMinioClient(endpoint, bucket, id, secret string, secure bool) (*minioClient, error) { + mclient, err := minio.New(endpoint, id, secret, secure) + if err != nil { + return nil, err + } + return &minioClient{ + bucket: bucket, + Client: mclient, + }, nil +} + +func (c *minioClient) Writer(name string) (io.WriteCloser, error) { + reader, writer := io.Pipe() + go func(reader *io.PipeReader) { + _, err := c.PutObject(c.bucket, name, reader, "application/octet-stream") + if err != nil { + reader.CloseWithError(err) + return + } + }(reader) + return writer, nil +} + +func (c *minioClient) Walk(name string, fn func(name string) error) error { + recursive := true // Recursively walk by default. + + doneCh := make(chan struct{}) + defer close(doneCh) + for objInfo := range c.ListObjectsV2(c.bucket, name, recursive, doneCh) { + if objInfo.Err != nil { + return objInfo.Err + } + if err := fn(objInfo.Key); err != nil { + return err + } + } + return nil +} + +// sectionReadCloser implements a closer compatible wrapper +// over *io.SectionReader. +type sectionReadCloser struct { + *io.SectionReader + mObj *minio.Object +} + +func (s *sectionReadCloser) Close() (err error) { + return s.mObj.Close() +} + +func (c *minioClient) Reader(name string, offset uint64, size uint64) (io.ReadCloser, error) { + obj, err := c.GetObject(c.bucket, name) + if err != nil { + return nil, err + } + sectionReader := io.NewSectionReader(obj, int64(offset), int64(size)) + return §ionReadCloser{ + SectionReader: sectionReader, + mObj: obj, + }, nil +} + +func (c *minioClient) Delete(name string) error { + return c.RemoveObject(c.bucket, name) +} + +func (c *minioClient) Exists(name string) bool { + _, err := c.StatObject(c.bucket, name) + return err == nil +} + +func (c *minioClient) IsRetryable(err error) bool { + // Minio client already implements retrying, no + // need for a caller retry. + return false +} + +func (c *minioClient) IsIgnorable(err error) bool { + return false +} + +// Sentinel error response returned if err is not +// of type *minio.ErrorResponse. +var sentinelErrResp = minio.ErrorResponse{} + +func (c *minioClient) IsNotExist(err error) bool { + errResp := minio.ToErrorResponse(err) + if errResp == sentinelErrResp { + return false + } + // Treat both object not found and bucket not found as IsNotExist(). + return errResp.Code == "NoSuchKey" || errResp.Code == "NoSuchBucket" +} diff --git a/src/server/pkg/obj/obj.go b/src/server/pkg/obj/obj.go index b7468353895..31bd5e7a5f4 100644 --- a/src/server/pkg/obj/obj.go +++ b/src/server/pkg/obj/obj.go @@ -88,6 +88,16 @@ func NewMicrosoftClientFromSecret(container string) (Client, error) { return NewMicrosoftClient(container, string(id), string(secret)) } +// NewMinioClient creates an s3 compatible client with the following credentials: +// endpoint - S3 compatible endpoint +// bucket - S3 bucket name +// id - AWS access key id +// secret - AWS secret access key +// secure - Set to true if connection is secure. +func NewMinioClient(endpoint, bucket, id, secret string, secure bool) (Client, error) { + return newMinioClient(endpoint, bucket, id, secret, secure) +} + // NewAmazonClient creates an amazon client with the following credentials: // bucket - S3 bucket name // id - AWS access key id @@ -99,6 +109,36 @@ func NewAmazonClient(bucket string, id string, secret string, token string, return newAmazonClient(bucket, id, secret, token, region) } +// NewMinioClientFromSecret constructs an s3 compatible client by reading +// credentials from a mounted AmazonSecret. You may pass "" for bucket in which case it +// will read the bucket from the secret. +func NewMinioClientFromSecret(bucket string) (Client, error) { + if bucket == "" { + _bucket, err := ioutil.ReadFile("/minio-secret/bucket") + if err != nil { + return nil, err + } + bucket = string(_bucket) + } + endpoint, err := ioutil.ReadFile("/minio-secret/endpoint") + if err != nil { + return nil, err + } + id, err := ioutil.ReadFile("/minio-secret/id") + if err != nil { + return nil, err + } + secret, err := ioutil.ReadFile("/minio-secret/secret") + if err != nil { + return nil, err + } + secure, err := ioutil.ReadFile("/minio-secret/secure") + if err != nil { + return nil, err + } + return NewMinioClient(string(endpoint), bucket, string(id), string(secret), string(secure) == "1") +} + // NewAmazonClientFromSecret constructs an amazon client by reading credentials // from a mounted AmazonSecret. You may pass "" for bucket in which case it // will read the bucket from the secret. diff --git a/src/server/vendor/github.com/minio/minio-go/CONTRIBUTING.md b/src/server/vendor/github.com/minio/minio-go/CONTRIBUTING.md new file mode 100644 index 00000000000..8b1ee86c6d0 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/CONTRIBUTING.md @@ -0,0 +1,23 @@ + +### Developer Guidelines + +``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: + +* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. + - Fork it + - Create your feature branch (git checkout -b my-new-feature) + - Commit your changes (git commit -am 'Add some feature') + - Push to the branch (git push origin my-new-feature) + - Create new Pull Request + +* When you're ready to create a pull request, be sure to: + - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. + - Run `go fmt` + - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. + - Make sure `go test -race ./...` and `go build` completes. + NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables + ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...`` + +* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project + - `minio-go` project is strictly conformant with Golang style + - if you happen to observe offending code, please feel free to send a pull request diff --git a/src/server/vendor/github.com/minio/minio-go/LICENSE b/src/server/vendor/github.com/minio/minio-go/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/server/vendor/github.com/minio/minio-go/MAINTAINERS.md b/src/server/vendor/github.com/minio/minio-go/MAINTAINERS.md new file mode 100644 index 00000000000..6dbef6265fb --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/MAINTAINERS.md @@ -0,0 +1,19 @@ +# For maintainers only + +## Responsibilities + +Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) + +### Making new releases + +Edit `libraryVersion` constant in `api.go`. + +``` +$ grep libraryVersion api.go + libraryVersion = "0.3.0" +``` + +``` +$ git tag 0.3.0 +$ git push --tags +``` \ No newline at end of file diff --git a/src/server/vendor/github.com/minio/minio-go/README.md b/src/server/vendor/github.com/minio/minio-go/README.md new file mode 100644 index 00000000000..f0d880b1e2c --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/README.md @@ -0,0 +1,256 @@ +# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. + +**Supported cloud storage providers:** + +- AWS Signature Version 4 + - Amazon S3 + - Minio + + +- AWS Signature Version 2 + - Google Cloud Storage (Compatibility Mode) + - Openstack Swift + Swift3 middleware + - Ceph Object Gateway + - Riak CS + +This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference). + +This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang). + + +## Download from Github + +```sh + +go get -u github.com/minio/minio-go + +``` +## Initialize Minio Client + +Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage. + + +| Parameter | Description| +| :--- | :--- | +| endpoint | URL to object storage service. | +| accessKeyID | Access key is the user ID that uniquely identifies your account. | +| secretAccessKey | Secret key is the password to your account. | +| secure | Set this value to 'true' to enable secure (HTTPS) access. | + + +```go + +package main + +import ( + "github.com/minio/minio-go" + "log" +) + +func main() { + endpoint := "play.minio.io:9000" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // Initialize minio client object. + minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + + log.Println("%v", minioClient) // minioClient is now setup + + +``` + +## Quick Start Example - File Uploader + +This example program connects to an object storage server, creates a bucket and uploads a file to the bucket. + + + + +We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public. + +#### FileUploader.go + +```go +package main + +import ( + "github.com/minio/minio-go" + "log" +) + +func main() { + endpoint := "play.minio.io:9000" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // Initialize minio client object. + minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + + // Make a new bucked called mymusic. + bucketName := "mymusic" + location := "us-east-1" + + err = minioClient.MakeBucket(bucketName, location) + if err != nil { + // Check to see if we already own this bucket (which happens if you run this twice) + exists, err := minioClient.BucketExists(bucketName) + if err == nil && exists { + log.Printf("We already own %s\n", bucketName) + } else { + log.Fatalln(err) + } + } + log.Printf("Successfully created %s\n", bucketName) + + // Upload the zip file + objectName := "golden-oldies.zip" + filePath := "/tmp/golden-oldies.zip" + contentType := "application/zip" + + // Upload the zip file with FPutObject + n, err := minioClient.FPutObject(bucketName, objectName, filePath, contentType) + if err != nil { + log.Fatalln(err) + } + + log.Printf("Successfully uploaded %s of size %d\n", objectName, n) +} +``` + +#### Run FileUploader + +```sh + +go run file-uploader.go +2016/08/13 17:03:28 Successfully created mymusic +2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 + +mc ls play/mymusic/ +[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip + +``` + +## API Reference + +The full API Reference is available here. + +* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference) + +### API Reference : Bucket Operations + +* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket) +* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets) +* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists) +* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket) +* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects) +* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2) +* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads) + +### API Reference : Bucket policy Operations + +* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy) +* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy) +* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies) + +### API Reference : Bucket notification Operations + +* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification) +* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension) + +### API Reference : File Object Operations + +* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) +* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) + +### API Reference : Object Operations + +* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject) +* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject) +* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject) +* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject) +* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject) +* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects) +* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload) + +### API Reference : Presigned Operations + +* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject) +* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject) +* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy) + +### API Reference : Client custom settings +* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo) +* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport) +* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn) +* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff) + + +## Full Examples + +#### Full Examples : Bucket Operations + +* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) +* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) +* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) +* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) +* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) +* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) +* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) + +#### Full Examples : Bucket policy Operations + +* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) +* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) +* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) + +#### Full Examples : Bucket notification Operations + +* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) +* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) +* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) +* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension) + +#### Full Examples : File Object Operations + +* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) +* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) + +#### Full Examples : Object Operations + +* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) +* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) +* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) +* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) +* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) +* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) + +#### Full Examples : Presigned Operations +* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) +* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) +* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) + +## Explore Further +* [Complete Documentation](https://docs.minio.io) +* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference) +* [Go Music Player App- Full Application Example ](https://docs.minio.io/docs/go-music-player-app) + +## Contribute + +[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) + +[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) +[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) + diff --git a/src/server/vendor/github.com/minio/minio-go/api-datatypes.go b/src/server/vendor/github.com/minio/minio-go/api-datatypes.go new file mode 100644 index 00000000000..ab2aa4af2c8 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-datatypes.go @@ -0,0 +1,83 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + "time" +) + +// BucketInfo container for bucket metadata. +type BucketInfo struct { + // The name of the bucket. + Name string `json:"name"` + // Date the bucket was created. + CreationDate time.Time `json:"creationDate"` +} + +// ObjectInfo container for object metadata. +type ObjectInfo struct { + // An ETag is optionally set to md5sum of an object. In case of multipart objects, + // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of + // each parts concatenated into one string. + ETag string `json:"etag"` + + Key string `json:"name"` // Name of the object + LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. + Size int64 `json:"size"` // Size in bytes of the object. + ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. + + // Collection of additional metadata on the object. + // eg: x-amz-meta-*, content-encoding etc. + Metadata http.Header `json:"metadata"` + + // Owner name. + Owner struct { + DisplayName string `json:"name"` + ID string `json:"id"` + } `json:"owner"` + + // The class of storage used to store the object. + StorageClass string `json:"storageClass"` + + // Error + Err error `json:"-"` +} + +// ObjectMultipartInfo container for multipart object metadata. +type ObjectMultipartInfo struct { + // Date and time at which the multipart upload was initiated. + Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Initiator initiator + Owner owner + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass string + + // Key of the object for which the multipart upload was initiated. + Key string + + // Size in bytes of the object. + Size int64 + + // Upload ID that identifies the multipart upload. + UploadID string `xml:"UploadId"` + + // Error + Err error +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-error-response.go b/src/server/vendor/github.com/minio/minio-go/api-error-response.go new file mode 100644 index 00000000000..fee3c7d53a9 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-error-response.go @@ -0,0 +1,245 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "fmt" + "net/http" + "strconv" +) + +/* **** SAMPLE ERROR RESPONSE **** + + + AccessDenied + Access Denied + bucketName + objectName + F19772218238A85A + GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD + +*/ + +// ErrorResponse - Is the typed error returned by all API operations. +type ErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + + // Region where the bucket is located. This header is returned + // only in HEAD bucket and ListObjects response. + Region string +} + +// ToErrorResponse - Returns parsed ErrorResponse struct from body and +// http headers. +// +// For example: +// +// import s3 "github.com/minio/minio-go" +// ... +// ... +// reader, stat, err := s3.GetObject(...) +// if err != nil { +// resp := s3.ToErrorResponse(err) +// } +// ... +func ToErrorResponse(err error) ErrorResponse { + switch err := err.(type) { + case ErrorResponse: + return err + default: + return ErrorResponse{} + } +} + +// Error - Returns HTTP error string +func (e ErrorResponse) Error() string { + return e.Message +} + +// Common string for errors to report issue location in unexpected +// cases. +const ( + reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." +) + +// httpRespToErrorResponse returns a new encoded ErrorResponse +// structure as error. +func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { + if resp == nil { + msg := "Response is empty. " + reportIssue + return ErrInvalidArgument(msg) + } + var errResp ErrorResponse + err := xmlDecoder(resp.Body, &errResp) + // Xml decoding failed with no body, fall back to HTTP headers. + if err != nil { + switch resp.StatusCode { + case http.StatusNotFound: + if objectName == "" { + errResp = ErrorResponse{ + Code: "NoSuchBucket", + Message: "The specified bucket does not exist.", + BucketName: bucketName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + } else { + errResp = ErrorResponse{ + Code: "NoSuchKey", + Message: "The specified key does not exist.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + } + case http.StatusForbidden: + errResp = ErrorResponse{ + Code: "AccessDenied", + Message: "Access Denied.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + case http.StatusConflict: + errResp = ErrorResponse{ + Code: "Conflict", + Message: "Bucket not empty.", + BucketName: bucketName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + default: + errResp = ErrorResponse{ + Code: resp.Status, + Message: resp.Status, + BucketName: bucketName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + } + } + return errResp +} + +// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. +func ErrTransferAccelerationBucket(bucketName string) error { + msg := fmt.Sprintf("The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").") + return ErrorResponse{ + Code: "InvalidArgument", + Message: msg, + BucketName: bucketName, + } +} + +// ErrEntityTooLarge - Input size is larger than supported maximum. +func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) + return ErrorResponse{ + Code: "EntityTooLarge", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrEntityTooSmall - Input size is smaller than supported minimum. +func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", totalSize) + return ErrorResponse{ + Code: "EntityTooLarge", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrUnexpectedEOF - Unexpected end of file reached. +func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.", + strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10)) + return ErrorResponse{ + Code: "UnexpectedEOF", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrInvalidBucketName - Invalid bucket name response. +func ErrInvalidBucketName(message string) error { + return ErrorResponse{ + Code: "InvalidBucketName", + Message: message, + RequestID: "minio", + } +} + +// ErrInvalidObjectName - Invalid object name response. +func ErrInvalidObjectName(message string) error { + return ErrorResponse{ + Code: "NoSuchKey", + Message: message, + RequestID: "minio", + } +} + +// ErrInvalidObjectPrefix - Invalid object prefix response is +// similar to object name response. +var ErrInvalidObjectPrefix = ErrInvalidObjectName + +// ErrInvalidArgument - Invalid argument response. +func ErrInvalidArgument(message string) error { + return ErrorResponse{ + Code: "InvalidArgument", + Message: message, + RequestID: "minio", + } +} + +// ErrNoSuchBucketPolicy - No Such Bucket Policy response +// The specified bucket does not have a bucket policy. +func ErrNoSuchBucketPolicy(message string) error { + return ErrorResponse{ + Code: "NoSuchBucketPolicy", + Message: message, + RequestID: "minio", + } +} + +// ErrAPINotSupported - API not supported response +// The specified API call is not supported +func ErrAPINotSupported(message string) error { + return ErrorResponse{ + Code: "APINotSupported", + Message: message, + RequestID: "minio", + } +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-get-object-file.go b/src/server/vendor/github.com/minio/minio-go/api-get-object-file.go new file mode 100644 index 00000000000..a38fc852a7c --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-get-object-file.go @@ -0,0 +1,104 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "io" + "os" + "path/filepath" +) + +// FGetObject - download contents of an object to a local file. +func (c Client) FGetObject(bucketName, objectName, filePath string) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err + } + + // Verify if destination already exists. + st, err := os.Stat(filePath) + if err == nil { + // If the destination exists and is a directory. + if st.IsDir() { + return ErrInvalidArgument("fileName is a directory.") + } + } + + // Proceed if file does not exist. return for all other errors. + if err != nil { + if !os.IsNotExist(err) { + return err + } + } + + // Extract top level directory. + objectDir, _ := filepath.Split(filePath) + if objectDir != "" { + // Create any missing top level directories. + if err := os.MkdirAll(objectDir, 0700); err != nil { + return err + } + } + + // Gather md5sum. + objectStat, err := c.StatObject(bucketName, objectName) + if err != nil { + return err + } + + // Write to a temporary file "fileName.part.minio" before saving. + filePartPath := filePath + objectStat.ETag + ".part.minio" + + // If exists, open in append mode. If not create it as a part file. + filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + return err + } + + // Issue Stat to get the current offset. + st, err = filePart.Stat() + if err != nil { + return err + } + + // Seek to current position for incoming reader. + objectReader, objectStat, err := c.getObject(bucketName, objectName, st.Size(), 0) + if err != nil { + return err + } + + // Write to the part file. + if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { + return err + } + + // Close the file before rename, this is specifically needed for Windows users. + if err = filePart.Close(); err != nil { + return err + } + + // Safely completed. Now commit by renaming to actual filename. + if err = os.Rename(filePartPath, filePath); err != nil { + return err + } + + // Return. + return nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-get-object.go b/src/server/vendor/github.com/minio/minio-go/api-get-object.go new file mode 100644 index 00000000000..c9b4dceddf4 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-get-object.go @@ -0,0 +1,643 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "errors" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" +) + +// GetObject - returns an seekable, readable object. +func (c Client) GetObject(bucketName, objectName string) (*Object, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return nil, err + } + if err := isValidObjectName(objectName); err != nil { + return nil, err + } + + var httpReader io.ReadCloser + var objectInfo ObjectInfo + var err error + // Create request channel. + reqCh := make(chan getRequest) + // Create response channel. + resCh := make(chan getResponse) + // Create done channel. + doneCh := make(chan struct{}) + + // This routine feeds partial object data as and when the caller reads. + go func() { + defer close(reqCh) + defer close(resCh) + + // Loop through the incoming control messages and read data. + for { + select { + // When the done channel is closed exit our routine. + case <-doneCh: + // Close the http response body before returning. + // This ends the connection with the server. + if httpReader != nil { + httpReader.Close() + } + return + + // Gather incoming request. + case req := <-reqCh: + // If this is the first request we may not need to do a getObject request yet. + if req.isFirstReq { + // First request is a Read/ReadAt. + if req.isReadOp { + // Differentiate between wanting the whole object and just a range. + if req.isReadAt { + // If this is a ReadAt request only get the specified range. + // Range is set with respect to the offset and length of the buffer requested. + // Do not set objectInfo from the first readAt request because it will not get + // the whole object. + httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer))) + } else { + // First request is a Read request. + httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0) + } + if err != nil { + resCh <- getResponse{ + Error: err, + } + return + } + // Read at least firstReq.Buffer bytes, if not we have + // reached our EOF. + size, err := io.ReadFull(httpReader, req.Buffer) + if err == io.ErrUnexpectedEOF { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + Size: int(size), + Error: err, + didRead: true, + } + } else { + // First request is a Stat or Seek call. + // Only need to run a StatObject until an actual Read or ReadAt request comes through. + objectInfo, err = c.StatObject(bucketName, objectName) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the go-routine. + return + } + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } + } else if req.settingObjectInfo { // Request is just to get objectInfo. + objectInfo, err := c.StatObject(bucketName, objectName) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the goroutine. + return + } + // Send back the objectInfo. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } else { + // Offset changes fetch the new object at an Offset. + // Because the httpReader may not be set by the first + // request if it was a stat or seek it must be checked + // if the object has been read or not to only initialize + // new ones when they haven't been already. + // All readAt requests are new requests. + if req.DidOffsetChange || !req.beenRead { + if httpReader != nil { + // Close previously opened http reader. + httpReader.Close() + } + // If this request is a readAt only get the specified range. + if req.isReadAt { + // Range is set with respect to the offset and length of the buffer requested. + httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer))) + } else { + httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0) + } + if err != nil { + resCh <- getResponse{ + Error: err, + } + return + } + } + + // Read at least req.Buffer bytes, if not we have + // reached our EOF. + size, err := io.ReadFull(httpReader, req.Buffer) + if err == io.ErrUnexpectedEOF { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + // Reply back how much was read. + resCh <- getResponse{ + Size: int(size), + Error: err, + didRead: true, + objectInfo: objectInfo, + } + } + } + } + }() + + // Create a newObject through the information sent back by reqCh. + return newObject(reqCh, resCh, doneCh), nil +} + +// get request message container to communicate with internal +// go-routine. +type getRequest struct { + Buffer []byte + Offset int64 // readAt offset. + DidOffsetChange bool // Tracks the offset changes for Seek requests. + beenRead bool // Determines if this is the first time an object is being read. + isReadAt bool // Determines if this request is a request to a specific range + isReadOp bool // Determines if this request is a Read or Read/At request. + isFirstReq bool // Determines if this request is the first time an object is being accessed. + settingObjectInfo bool // Determines if this request is to set the objectInfo of an object. +} + +// get response message container to reply back for the request. +type getResponse struct { + Size int + Error error + didRead bool // Lets subsequent calls know whether or not httpReader has been initiated. + objectInfo ObjectInfo // Used for the first request. +} + +// Object represents an open object. It implements Read, ReadAt, +// Seeker, Close for a HTTP stream. +type Object struct { + // Mutex. + mutex *sync.Mutex + + // User allocated and defined. + reqCh chan<- getRequest + resCh <-chan getResponse + doneCh chan<- struct{} + prevOffset int64 + currOffset int64 + objectInfo ObjectInfo + + // Keeps track of closed call. + isClosed bool + + // Keeps track of if this is the first call. + isStarted bool + + // Previous error saved for future calls. + prevErr error + + // Keeps track of if this object has been read yet. + beenRead bool + + // Keeps track of if objectInfo has been set yet. + objectInfoSet bool +} + +// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object. +// Returns back the size of the buffer read, if anything was read, as well +// as any error encountered. For all first requests sent on the object +// it is also responsible for sending back the objectInfo. +func (o *Object) doGetRequest(request getRequest) (getResponse, error) { + o.reqCh <- request + response := <-o.resCh + // This was the first request. + if !o.isStarted { + // The object has been operated on. + o.isStarted = true + } + // Set the objectInfo if the request was not readAt + // and it hasn't been set before. + if !o.objectInfoSet && !request.isReadAt { + o.objectInfo = response.objectInfo + o.objectInfoSet = true + } + // Set beenRead only if it has not been set before. + if !o.beenRead { + o.beenRead = response.didRead + } + // Return any error to the top level. + if response.Error != nil { + return response, response.Error + } + return response, nil +} + +// setOffset - handles the setting of offsets for +// Read/ReadAt/Seek requests. +func (o *Object) setOffset(bytesRead int64) error { + // Update the currentOffset. + o.currOffset += bytesRead + // Save the current offset as previous offset. + o.prevOffset = o.currOffset + + if o.currOffset >= o.objectInfo.Size { + return io.EOF + } + return nil +} + +// Read reads up to len(b) bytes into b. It returns the number of +// bytes read (0 <= n <= len(p)) and any error encountered. Returns +// io.EOF upon end of file. +func (o *Object) Read(b []byte) (n int, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is previous error saved from previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + // Create a new request. + readReq := getRequest{ + isReadOp: true, + beenRead: o.beenRead, + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readReq.isFirstReq = true + } + + // Verify if offset has changed and currOffset is greater than + // previous offset. Perhaps due to Seek(). + offsetChange := o.prevOffset - o.currOffset + if offsetChange < 0 { + offsetChange = -offsetChange + } + if offsetChange > 0 { + // Fetch the new reader at the current offset again. + readReq.Offset = o.currOffset + readReq.DidOffsetChange = true + } else { + // No offset changes no need to fetch new reader, continue + // reading. + readReq.DidOffsetChange = false + readReq.Offset = 0 + } + + // Send and receive from the first request. + response, err := o.doGetRequest(readReq) + if err != nil && err != io.EOF { + // Save the error for future calls. + o.prevErr = err + return response.Size, err + } + + // Bytes read. + bytesRead := int64(response.Size) + + // Set the new offset. + oerr := o.setOffset(bytesRead) + if oerr != nil { + // Save the error for future calls. + o.prevErr = oerr + return response.Size, oerr + } + + // Return the response. + return response.Size, err +} + +// Stat returns the ObjectInfo structure describing Object. +func (o *Object) Stat() (ObjectInfo, error) { + if o == nil { + return ObjectInfo{}, ErrInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { + return ObjectInfo{}, o.prevErr + } + + // This is the first request. + if !o.isStarted || !o.objectInfoSet { + statReq := getRequest{ + isFirstReq: !o.isStarted, + settingObjectInfo: !o.objectInfoSet, + } + + // Send the request and get the response. + _, err := o.doGetRequest(statReq) + if err != nil { + o.prevErr = err + return ObjectInfo{}, err + } + } + + return o.objectInfo, nil +} + +// ReadAt reads len(b) bytes from the File starting at byte offset +// off. It returns the number of bytes read and the error, if any. +// ReadAt always returns a non-nil error when n < len(b). At end of +// file, that error is io.EOF. +func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is error which was saved in previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + + // Can only compare offsets to size when size has been set. + if o.objectInfoSet { + // If offset is negative than we return io.EOF. + // If offset is greater than or equal to object size we return io.EOF. + if offset >= o.objectInfo.Size || offset < 0 { + return 0, io.EOF + } + } + + // Create the new readAt request. + readAtReq := getRequest{ + isReadOp: true, + isReadAt: true, + DidOffsetChange: true, // Offset always changes. + beenRead: o.beenRead, // Set if this is the first request to try and read. + Offset: offset, // Set the offset. + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readAtReq.isFirstReq = true + } + + // Send and receive from the first request. + response, err := o.doGetRequest(readAtReq) + if err != nil && err != io.EOF { + // Save the error. + o.prevErr = err + return response.Size, err + } + // Bytes read. + bytesRead := int64(response.Size) + // There is no valid objectInfo yet + // to compare against for EOF. + if !o.objectInfoSet { + // Update the currentOffset. + o.currOffset += bytesRead + // Save the current offset as previous offset. + o.prevOffset = o.currOffset + } else { + // If this was not the first request update + // the offsets and compare against objectInfo + // for EOF. + oerr := o.setOffset(bytesRead) + if oerr != nil { + o.prevErr = oerr + return response.Size, oerr + } + } + return response.Size, err +} + +// Seek sets the offset for the next Read or Write to offset, +// interpreted according to whence: 0 means relative to the +// origin of the file, 1 means relative to the current offset, +// and 2 means relative to the end. +// Seek returns the new offset and an error, if any. +// +// Seeking to a negative offset is an error. Seeking to any positive +// offset is legal, subsequent io operations succeed until the +// underlying object is not closed. +func (o *Object) Seek(offset int64, whence int) (n int64, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil { + // At EOF seeking is legal allow only io.EOF, for any other errors we return. + if o.prevErr != io.EOF { + return 0, o.prevErr + } + } + + // Negative offset is valid for whence of '2'. + if offset < 0 && whence != 2 { + return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence)) + } + + // This is the first request. So before anything else + // get the ObjectInfo. + if !o.isStarted || !o.objectInfoSet { + // Create the new Seek request. + seekReq := getRequest{ + isReadOp: false, + Offset: offset, + isFirstReq: true, + } + // Send and receive from the seek request. + _, err := o.doGetRequest(seekReq) + if err != nil { + // Save the error. + o.prevErr = err + return 0, err + } + } + // Save current offset as previous offset. + o.prevOffset = o.currOffset + + // Switch through whence. + switch whence { + default: + return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) + case 0: + if offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset = offset + case 1: + if o.currOffset+offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset += offset + case 2: + // Seeking to positive offset is valid for whence '2', but + // since we are backing a Reader we have reached 'EOF' if + // offset is positive. + if offset > 0 { + return 0, io.EOF + } + // Seeking to negative position not allowed for whence. + if o.objectInfo.Size+offset < 0 { + return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) + } + o.currOffset = o.objectInfo.Size + offset + } + // Reset the saved error since we successfully seeked, let the Read + // and ReadAt decide. + if o.prevErr == io.EOF { + o.prevErr = nil + } + // Return the effective offset. + return o.currOffset, nil +} + +// Close - The behavior of Close after the first call returns error +// for subsequent Close() calls. +func (o *Object) Close() (err error) { + if o == nil { + return ErrInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // if already closed return an error. + if o.isClosed { + return o.prevErr + } + + // Close successfully. + close(o.doneCh) + + // Save for future operations. + errMsg := "Object is already closed. Bad file descriptor." + o.prevErr = errors.New(errMsg) + // Save here that we closed done channel successfully. + o.isClosed = true + return nil +} + +// newObject instantiates a new *minio.Object* +// ObjectInfo will be set by setObjectInfo +func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object { + return &Object{ + mutex: &sync.Mutex{}, + reqCh: reqCh, + resCh: resCh, + doneCh: doneCh, + } +} + +// getObject - retrieve object from Object Storage. +// +// Additionally this function also takes range arguments to download the specified +// range bytes of an object. Setting offset and length = 0 will download the full object. +// +// For more information about the HTTP Range header. +// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. +func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectInfo, error) { + // Validate input arguments. + if err := isValidBucketName(bucketName); err != nil { + return nil, ObjectInfo{}, err + } + if err := isValidObjectName(objectName); err != nil { + return nil, ObjectInfo{}, err + } + + customHeader := make(http.Header) + // Set ranges if length and offset are valid. + // See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. + if length > 0 && offset >= 0 { + customHeader.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + } else if offset > 0 && length == 0 { + customHeader.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } else if length < 0 && offset == 0 { + customHeader.Set("Range", fmt.Sprintf("bytes=%d", length)) + } + + // Execute GET on objectName. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + }) + if err != nil { + return nil, ObjectInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Trim off the odd double quotes from ETag in the beginning and end. + md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + md5sum = strings.TrimSuffix(md5sum, "\"") + + // Parse the date. + date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) + if err != nil { + msg := "Last-Modified time format not recognized. " + reportIssue + return nil, ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: msg, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + } + // Get content-type. + contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + var objectStat ObjectInfo + objectStat.ETag = md5sum + objectStat.Key = objectName + objectStat.Size = resp.ContentLength + objectStat.LastModified = date + objectStat.ContentType = contentType + + // do not close body here, caller will close + return resp.Body, objectStat, nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-get-policy.go b/src/server/vendor/github.com/minio/minio-go/api-get-policy.go new file mode 100644 index 00000000000..da0a409cd93 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-get-policy.go @@ -0,0 +1,95 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/minio/minio-go/pkg/policy" +) + +// GetBucketPolicy - get bucket policy at a given path. +func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return policy.BucketPolicyNone, err + } + if err := isValidObjectPrefix(objectPrefix); err != nil { + return policy.BucketPolicyNone, err + } + policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix) + if err != nil { + return policy.BucketPolicyNone, err + } + return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil +} + +// ListBucketPolicies - list all policies for a given prefix and all its children. +func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return map[string]policy.BucketPolicy{}, err + } + if err := isValidObjectPrefix(objectPrefix); err != nil { + return map[string]policy.BucketPolicy{}, err + } + policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix) + if err != nil { + return map[string]policy.BucketPolicy{}, err + } + return policy.GetPolicies(policyInfo.Statements, bucketName), nil +} + +// Request server for current bucket policy. +func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (policy.BucketAccessPolicy, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return policy.BucketAccessPolicy{}, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + if ToErrorResponse(errResponse).Code == "NoSuchBucketPolicy" { + return policy.BucketAccessPolicy{Version: "2012-10-17"}, nil + } + return policy.BucketAccessPolicy{}, errResponse + } + } + bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return policy.BucketAccessPolicy{}, err + } + + policy := policy.BucketAccessPolicy{} + err = json.Unmarshal(bucketPolicyBuf, &policy) + return policy, err +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-list.go b/src/server/vendor/github.com/minio/minio-go/api-list.go new file mode 100644 index 00000000000..adfaa0a7aa6 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-list.go @@ -0,0 +1,699 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "net/http" + "net/url" + "strings" +) + +// ListBuckets list all buckets owned by this authenticated user. +// +// This call requires explicit authentication, no anonymous requests are +// allowed for listing buckets. +// +// api := client.New(....) +// for message := range api.ListBuckets() { +// fmt.Println(message) +// } +// +func (c Client) ListBuckets() ([]BucketInfo, error) { + // Execute GET on service. + resp, err := c.executeMethod("GET", requestMetadata{}) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, "", "") + } + } + listAllMyBucketsResult := listAllMyBucketsResult{} + err = xmlDecoder(resp.Body, &listAllMyBucketsResult) + if err != nil { + return nil, err + } + return listAllMyBucketsResult.Buckets.Bucket, nil +} + +/// Bucket Read Operations. + +// ListObjectsV2 lists all objects matching the objectPrefix from +// the specified bucket. If recursion is enabled it would list +// all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive +// and a done channel for pro-actively closing the internal go +// routine. If you enable recursive as 'true' this function will +// return back all the objects in a given bucket name and object +// prefix. +// +// api := client.New(....) +// // Create a done channel. +// doneCh := make(chan struct{}) +// defer close(doneCh) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) { +// fmt.Println(message) +// } +// +func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if recursive { + // If recursive we do not delimit. + delimiter = "" + } + // Return object owner information by default + fetchOwner := true + // Validate bucket name. + if err := isValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + // Validate incoming object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer close(objectStatCh) + // Save continuationToken for next request. + var continuationToken string + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000) + if err != nil { + objectStatCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + // Save the marker. + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + object := ObjectInfo{} + object.Key = obj.Prefix + object.Size = 0 + select { + // Send object prefixes. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // If continuation token present, save it for next request. + if result.NextContinuationToken != "" { + continuationToken = result.NextContinuationToken + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh +} + +// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?continuation-token - Specifies the key to start with when listing objects in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (listBucketV2Result, error) { + // Validate bucket name. + if err := isValidBucketName(bucketName); err != nil { + return listBucketV2Result{}, err + } + // Validate object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + return listBucketV2Result{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Always set list-type in ListObjects V2 + urlValues.Set("list-type", "2") + + // Set object prefix. + if objectPrefix != "" { + urlValues.Set("prefix", objectPrefix) + } + // Set continuation token + if continuationToken != "" { + urlValues.Set("continuation-token", continuationToken) + } + // Set delimiter. + if delimiter != "" { + urlValues.Set("delimiter", delimiter) + } + + // Fetch owner when listing + if fetchOwner { + urlValues.Set("fetch-owner", "true") + } + + // maxkeys should default to 1000 or less. + if maxkeys == 0 || maxkeys > 1000 { + maxkeys = 1000 + } + // Set max keys. + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + defer closeResponse(resp) + if err != nil { + return listBucketV2Result{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return listBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode listBuckets XML. + listBucketResult := listBucketV2Result{} + err = xmlDecoder(resp.Body, &listBucketResult) + if err != nil { + return listBucketResult, err + } + return listBucketResult, nil +} + +// ListObjects - (List Objects) - List some objects or all recursively. +// +// ListObjects lists all objects matching the objectPrefix from +// the specified bucket. If recursion is enabled it would list +// all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive +// and a done channel for pro-actively closing the internal go +// routine. If you enable recursive as 'true' this function will +// return back all the objects in a given bucket name and object +// prefix. +// +// api := client.New(....) +// // Create a done channel. +// doneCh := make(chan struct{}) +// defer close(doneCh) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) { +// fmt.Println(message) +// } +// +func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if recursive { + // If recursive we do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := isValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + // Validate incoming object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer close(objectStatCh) + // Save marker for next request. + var marker string + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000) + if err != nil { + objectStatCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + // Save the marker. + marker = object.Key + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + object := ObjectInfo{} + object.Key = obj.Prefix + object.Size = 0 + select { + // Send object prefixes. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // If next marker present, save it for next request. + if result.NextMarker != "" { + marker = result.NextMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh +} + +// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?marker - Specifies the key to start with when listing objects in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (listBucketResult, error) { + // Validate bucket name. + if err := isValidBucketName(bucketName); err != nil { + return listBucketResult{}, err + } + // Validate object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + return listBucketResult{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + // Set object prefix. + if objectPrefix != "" { + urlValues.Set("prefix", objectPrefix) + } + // Set object marker. + if objectMarker != "" { + urlValues.Set("marker", objectMarker) + } + // Set delimiter. + if delimiter != "" { + urlValues.Set("delimiter", delimiter) + } + + // maxkeys should default to 1000 or less. + if maxkeys == 0 || maxkeys > 1000 { + maxkeys = 1000 + } + // Set max keys. + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + defer closeResponse(resp) + if err != nil { + return listBucketResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return listBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode listBuckets XML. + listBucketResult := listBucketResult{} + err = xmlDecoder(resp.Body, &listBucketResult) + if err != nil { + return listBucketResult, err + } + return listBucketResult, nil +} + +// ListIncompleteUploads - List incompletely uploaded multipart objects. +// +// ListIncompleteUploads lists all incompleted objects matching the +// objectPrefix from the specified bucket. If recursion is enabled +// it would list all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive +// and a done channel to pro-actively close the internal go routine. +// If you enable recursive as 'true' this function will return back all +// the multipart objects in a given bucket name. +// +// api := client.New(....) +// // Create a done channel. +// doneCh := make(chan struct{}) +// defer close(doneCh) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) { +// fmt.Println(message) +// } +// +func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { + // Turn on size aggregation of individual parts. + isAggregateSize := true + return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh) +} + +// listIncompleteUploads lists all incomplete uploads. +func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { + // Allocate channel for multipart uploads. + objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) + // Delimiter is set to "/" by default. + delimiter := "/" + if recursive { + // If recursive do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := isValidBucketName(bucketName); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + // Validate incoming object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { + defer close(objectMultipartStatCh) + // object and upload ID marker for future requests. + var objectMarker string + var uploadIDMarker string + for { + // list all multipart uploads. + result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000) + if err != nil { + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return + } + // Save objectMarker and uploadIDMarker for next request. + objectMarker = result.NextKeyMarker + uploadIDMarker = result.NextUploadIDMarker + // Send all multipart uploads. + for _, obj := range result.Uploads { + // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. + if aggregateSize { + // Get total multipart size. + obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID) + if err != nil { + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + continue + } + } + select { + // Send individual uploads here. + case objectMultipartStatCh <- obj: + // If done channel return here. + case <-doneCh: + return + } + } + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + object := ObjectMultipartInfo{} + object.Key = obj.Prefix + object.Size = 0 + select { + // Send delimited prefixes here. + case objectMultipartStatCh <- object: + // If done channel return here. + case <-doneCh: + return + } + } + // Listing ends if result not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectMultipartStatCh) + // return. + return objectMultipartStatCh +} + +// listMultipartUploads - (List Multipart Uploads). +// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. +// request parameters. :- +// --------- +// ?key-marker - Specifies the multipart upload after which listing should begin. +// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. +func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (listMultipartUploadsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set uploads. + urlValues.Set("uploads", "") + // Set object key marker. + if keyMarker != "" { + urlValues.Set("key-marker", keyMarker) + } + // Set upload id marker. + if uploadIDMarker != "" { + urlValues.Set("upload-id-marker", uploadIDMarker) + } + // Set prefix marker. + if prefix != "" { + urlValues.Set("prefix", prefix) + } + // Set delimiter. + if delimiter != "" { + urlValues.Set("delimiter", delimiter) + } + + // maxUploads should be 1000 or less. + if maxUploads == 0 || maxUploads > 1000 { + maxUploads = 1000 + } + // Set max-uploads. + urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) + + // Execute GET on bucketName to list multipart uploads. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + defer closeResponse(resp) + if err != nil { + return listMultipartUploadsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return listMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode response body. + listMultipartUploadsResult := listMultipartUploadsResult{} + err = xmlDecoder(resp.Body, &listMultipartUploadsResult) + if err != nil { + return listMultipartUploadsResult, err + } + return listMultipartUploadsResult, nil +} + +// listObjectParts list all object parts recursively. +func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]objectPart, err error) { + // Part number marker for the next batch of request. + var nextPartNumberMarker int + partsInfo = make(map[int]objectPart) + for { + // Get list of uploaded parts a maximum of 1000 per request. + listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000) + if err != nil { + return nil, err + } + // Append to parts info. + for _, part := range listObjPartsResult.ObjectParts { + // Trim off the odd double quotes from ETag in the beginning and end. + part.ETag = strings.TrimPrefix(part.ETag, "\"") + part.ETag = strings.TrimSuffix(part.ETag, "\"") + partsInfo[part.PartNumber] = part + } + // Keep part number marker, for the next iteration. + nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker + // Listing ends result is not truncated, return right here. + if !listObjPartsResult.IsTruncated { + break + } + } + + // Return all the parts. + return partsInfo, nil +} + +// findUploadID lists all incomplete uploads and finds the uploadID of the matching object name. +func (c Client) findUploadID(bucketName, objectName string) (uploadID string, err error) { + // Make list incomplete uploads recursive. + isRecursive := true + // Turn off size aggregation of individual parts, in this request. + isAggregateSize := false + // latestUpload to track the latest multipart info for objectName. + var latestUpload ObjectMultipartInfo + // Create done channel to cleanup the routine. + doneCh := make(chan struct{}) + defer close(doneCh) + // List all incomplete uploads. + for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) { + if mpUpload.Err != nil { + return "", mpUpload.Err + } + if objectName == mpUpload.Key { + if mpUpload.Initiated.Sub(latestUpload.Initiated) > 0 { + latestUpload = mpUpload + } + } + } + // Return the latest upload id. + return latestUpload.UploadID, nil +} + +// getTotalMultipartSize - calculate total uploaded size for the a given multipart object. +func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) { + // Iterate over all parts and aggregate the size. + partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + return 0, err + } + for _, partInfo := range partsInfo { + size += partInfo.Size + } + return size, nil +} + +// listObjectPartsQuery (List Parts query) +// - lists some or all (up to 1000) parts that have been uploaded +// for a specific multipart upload +// +// You can use the request parameters as selection criteria to return +// a subset of the uploads in a bucket, request parameters :- +// --------- +// ?part-number-marker - Specifies the part after which listing should +// begin. +// ?max-parts - Maximum parts to be listed per request. +func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number marker. + urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + // maxParts should be 1000 or less. + if maxParts == 0 || maxParts > 1000 { + maxParts = 1000 + } + // Set max parts. + urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) + + // Execute GET on objectName to get list of parts. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + }) + defer closeResponse(resp) + if err != nil { + return listObjectPartsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return listObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode list object parts XML. + listObjectPartsResult := listObjectPartsResult{} + err = xmlDecoder(resp.Body, &listObjectPartsResult) + if err != nil { + return listObjectPartsResult, err + } + return listObjectPartsResult, nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-notification.go b/src/server/vendor/github.com/minio/minio-go/api-notification.go new file mode 100644 index 00000000000..9c2a2ebd230 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-notification.go @@ -0,0 +1,214 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// GetBucketNotification - get bucket notification at a given path. +func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return BucketNotification{}, err + } + notification, err := c.getBucketNotification(bucketName) + if err != nil { + return BucketNotification{}, err + } + return notification, nil +} + +// Request server for notification rules. +func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) { + urlValues := make(url.Values) + urlValues.Set("notification", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return BucketNotification{}, err + } + return processBucketNotificationResponse(bucketName, resp) + +} + +// processes the GetNotification http response from the server. +func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) { + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + return BucketNotification{}, errResponse + } + var bucketNotification BucketNotification + err := xmlDecoder(resp.Body, &bucketNotification) + if err != nil { + return BucketNotification{}, err + } + return bucketNotification, nil +} + +// Indentity represents the user id, this is a compliance field. +type identity struct { + PrincipalID string `json:"principalId"` +} + +// Notification event bucket metadata. +type bucketMeta struct { + Name string `json:"name"` + OwnerIdentity identity `json:"ownerIdentity"` + ARN string `json:"arn"` +} + +// Notification event object metadata. +type objectMeta struct { + Key string `json:"key"` + Size int64 `json:"size,omitempty"` + ETag string `json:"eTag,omitempty"` + VersionID string `json:"versionId,omitempty"` + Sequencer string `json:"sequencer"` +} + +// Notification event server specific metadata. +type eventMeta struct { + SchemaVersion string `json:"s3SchemaVersion"` + ConfigurationID string `json:"configurationId"` + Bucket bucketMeta `json:"bucket"` + Object objectMeta `json:"object"` +} + +// NotificationEvent represents an Amazon an S3 bucket notification event. +type NotificationEvent struct { + EventVersion string `json:"eventVersion"` + EventSource string `json:"eventSource"` + AwsRegion string `json:"awsRegion"` + EventTime string `json:"eventTime"` + EventName string `json:"eventName"` + UserIdentity identity `json:"userIdentity"` + RequestParameters map[string]string `json:"requestParameters"` + ResponseElements map[string]string `json:"responseElements"` + S3 eventMeta `json:"s3"` +} + +// NotificationInfo - represents the collection of notification events, additionally +// also reports errors if any while listening on bucket notifications. +type NotificationInfo struct { + Records []NotificationEvent + Err error +} + +// ListenBucketNotification - listen on bucket notifications. +func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo { + notificationInfoCh := make(chan NotificationInfo, 1) + // Only success, start a routine to start reading line by line. + go func(notificationInfoCh chan<- NotificationInfo) { + defer close(notificationInfoCh) + + // Validate the bucket name. + if err := isValidBucketName(bucketName); err != nil { + notificationInfoCh <- NotificationInfo{ + Err: err, + } + return + } + + // Check ARN partition to verify if listening bucket is supported + if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) { + notificationInfoCh <- NotificationInfo{ + Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"), + } + return + } + + // Continously run and listen on bucket notification. + // Create a done channel to control 'ListObjects' go routine. + retryDoneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(retryDoneCh) + + // Wait on the jitter retry loop. + for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { + urlValues := make(url.Values) + urlValues.Set("prefix", prefix) + urlValues.Set("suffix", suffix) + urlValues["events"] = events + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + if err != nil { + continue + } + + // Validate http response, upon error return quickly. + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + notificationInfoCh <- NotificationInfo{ + Err: errResponse, + } + return + } + + // Initialize a new bufio scanner, to read line by line. + bio := bufio.NewScanner(resp.Body) + + // Close the response body. + defer resp.Body.Close() + + // Unmarshal each line, returns marshalled values. + for bio.Scan() { + var notificationInfo NotificationInfo + if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { + continue + } + // Send notifications on channel only if there are events received. + if len(notificationInfo.Records) > 0 { + select { + case notificationInfoCh <- notificationInfo: + case <-doneCh: + return + } + } + } + // Look for any underlying errors. + if err = bio.Err(); err != nil { + // For an unexpected connection drop from server, we close the body + // and re-connect. + if err == io.ErrUnexpectedEOF { + resp.Body.Close() + } + } + } + }(notificationInfoCh) + + // Returns the notification info channel, for caller to start reading from. + return notificationInfoCh +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-presigned.go b/src/server/vendor/github.com/minio/minio-go/api-presigned.go new file mode 100644 index 00000000000..f9d05ab9b8d --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-presigned.go @@ -0,0 +1,180 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "errors" + "net/url" + "time" + + "github.com/minio/minio-go/pkg/s3signer" + "github.com/minio/minio-go/pkg/s3utils" +) + +// supportedGetReqParams - supported request parameters for GET presigned request. +var supportedGetReqParams = map[string]struct{}{ + "response-expires": {}, + "response-content-type": {}, + "response-cache-control": {}, + "response-content-language": {}, + "response-content-encoding": {}, + "response-content-disposition": {}, +} + +// presignURL - Returns a presigned URL for an input 'method'. +// Expires maximum is 7days - ie. 604800 and minimum is 1. +func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + // Input validation. + if method == "" { + return nil, ErrInvalidArgument("method cannot be empty.") + } + if err := isValidBucketName(bucketName); err != nil { + return nil, err + } + if err := isValidObjectName(objectName); err != nil { + return nil, err + } + if err := isValidExpiry(expires); err != nil { + return nil, err + } + + // Convert expires into seconds. + expireSeconds := int64(expires / time.Second) + reqMetadata := requestMetadata{ + presignURL: true, + bucketName: bucketName, + objectName: objectName, + expires: expireSeconds, + } + + // For "GET" we are handling additional request parameters to + // override its response headers. + if method == "GET" { + // Verify if input map has unsupported params, if yes exit. + for k := range reqParams { + if _, ok := supportedGetReqParams[k]; !ok { + return nil, ErrInvalidArgument(k + " unsupported request parameter for presigned GET.") + } + } + // Save the request parameters to be used in presigning for GET request. + reqMetadata.queryValues = reqParams + } + + // Instantiate a new request. + // Since expires is set newRequest will presign the request. + req, err := c.newRequest(method, reqMetadata) + if err != nil { + return nil, err + } + return req.URL, nil +} + +// PresignedGetObject - Returns a presigned URL to access an object +// without credentials. Expires maximum is 7days - ie. 604800 and +// minimum is 1. Additionally you can override a set of response +// headers using the query parameters. +func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + return c.presignURL("GET", bucketName, objectName, expires, reqParams) +} + +// PresignedPutObject - Returns a presigned URL to upload an object without credentials. +// Expires maximum is 7days - ie. 604800 and minimum is 1. +func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { + return c.presignURL("PUT", bucketName, objectName, expires, nil) +} + +// PresignedPostPolicy - Returns POST urlString, form data to upload an object. +func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) { + // Validate input arguments. + if p.expiration.IsZero() { + return nil, nil, errors.New("Expiration time must be specified") + } + if _, ok := p.formData["key"]; !ok { + return nil, nil, errors.New("object key must be specified") + } + if _, ok := p.formData["bucket"]; !ok { + return nil, nil, errors.New("bucket name must be specified") + } + + bucketName := p.formData["bucket"] + // Fetch the bucket location. + location, err := c.getBucketLocation(bucketName) + if err != nil { + return nil, nil, err + } + + u, err = c.makeTargetURL(bucketName, "", location, nil) + if err != nil { + return nil, nil, err + } + + // Keep time. + t := time.Now().UTC() + // For signature version '2' handle here. + if c.signature.isV2() { + policyBase64 := p.base64() + p.formData["policy"] = policyBase64 + // For Google endpoint set this value to be 'GoogleAccessId'. + if s3utils.IsGoogleEndpoint(c.endpointURL) { + p.formData["GoogleAccessId"] = c.accessKeyID + } else { + // For all other endpoints set this value to be 'AWSAccessKeyId'. + p.formData["AWSAccessKeyId"] = c.accessKeyID + } + // Sign the policy. + p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, c.secretAccessKey) + return u, p.formData, nil + } + + // Add date policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-date", + value: t.Format(iso8601DateFormat), + }); err != nil { + return nil, nil, err + } + + // Add algorithm policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-algorithm", + value: signV4Algorithm, + }); err != nil { + return nil, nil, err + } + + // Add a credential policy. + credential := s3signer.GetCredential(c.accessKeyID, location, t) + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-credential", + value: credential, + }); err != nil { + return nil, nil, err + } + + // Get base64 encoded policy. + policyBase64 := p.base64() + // Fill in the form data. + p.formData["policy"] = policyBase64 + p.formData["x-amz-algorithm"] = signV4Algorithm + p.formData["x-amz-credential"] = credential + p.formData["x-amz-date"] = t.Format(iso8601DateFormat) + p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location) + return u, p.formData, nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-put-bucket.go b/src/server/vendor/github.com/minio/minio-go/api-put-bucket.go new file mode 100644 index 00000000000..7c7e03f49fd --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-put-bucket.go @@ -0,0 +1,293 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/xml" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "path" + + "github.com/minio/minio-go/pkg/policy" + "github.com/minio/minio-go/pkg/s3signer" +) + +/// Bucket operations + +// MakeBucket creates a new bucket with bucketName. +// +// Location is an optional argument, by default all buckets are +// created in US Standard Region. +// +// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html +// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations +func (c Client) MakeBucket(bucketName string, location string) error { + // Validate the input arguments. + if err := isValidBucketName(bucketName); err != nil { + return err + } + + // If location is empty, treat is a default region 'us-east-1'. + if location == "" { + location = "us-east-1" + } + + // Instantiate the request. + req, err := c.makeBucketRequest(bucketName, location) + if err != nil { + return err + } + + // Execute the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Save the location into cache on a successful makeBucket response. + c.bucketLocCache.Set(bucketName, location) + + // Return. + return nil +} + +// makeBucketRequest constructs request for makeBucket. +func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) { + // Validate input arguments. + if err := isValidBucketName(bucketName); err != nil { + return nil, err + } + + // In case of Amazon S3. The make bucket issued on already + // existing bucket would fail with 'AuthorizationMalformed' error + // if virtual style is used. So we default to 'path style' as that + // is the preferred method here. The final location of the + // 'bucket' is provided through XML LocationConstraint data with + // the request. + targetURL := c.endpointURL + targetURL.Path = path.Join(bucketName, "") + "/" + + // get a new HTTP request for the method. + req, err := http.NewRequest("PUT", targetURL.String(), nil) + if err != nil { + return nil, err + } + + // set UserAgent for the request. + c.setUserAgent(req) + + // set sha256 sum for signature calculation only with signature version '4'. + if c.signature.isV4() { + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) + } + + // If location is not 'us-east-1' create bucket location config. + if location != "us-east-1" && location != "" { + createBucketConfig := createBucketConfiguration{} + createBucketConfig.Location = location + var createBucketConfigBytes []byte + createBucketConfigBytes, err = xml.Marshal(createBucketConfig) + if err != nil { + return nil, err + } + createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes) + req.Body = ioutil.NopCloser(createBucketConfigBuffer) + req.ContentLength = int64(len(createBucketConfigBytes)) + // Set content-md5. + req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes))) + if c.signature.isV4() { + // Set sha256. + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes))) + } + } + + // Sign the request. + if c.signature.isV4() { + // Signature calculated for MakeBucket request should be for 'us-east-1', + // regardless of the bucket's location constraint. + req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1") + } else if c.signature.isV2() { + req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey) + } + + // Return signed request. + return req, nil +} + +// SetBucketPolicy set the access permissions on an existing bucket. +// +// For example +// +// none - owner gets full access [default]. +// readonly - anonymous get access for everyone at a given object prefix. +// readwrite - anonymous list/put/delete access to a given object prefix. +// writeonly - anonymous put/delete access to a given object prefix. +func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectPrefix(objectPrefix); err != nil { + return err + } + if !bucketPolicy.IsValidBucketPolicy() { + return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy)) + } + policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix) + if err != nil { + return err + } + + if bucketPolicy == policy.BucketPolicyNone && policyInfo.Statements == nil { + // As the request is for removing policy and the bucket + // has empty policy statements, just return success. + return nil + } + + policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketPolicy, bucketName, objectPrefix) + + // Save the updated policies. + return c.putBucketPolicy(bucketName, policyInfo) +} + +// Saves a new bucket policy. +func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + + // If there are no policy statements, we should remove entire policy. + if len(policyInfo.Statements) == 0 { + return c.removeBucketPolicy(bucketName) + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + policyBytes, err := json.Marshal(&policyInfo) + if err != nil { + return err + } + + policyBuffer := bytes.NewReader(policyBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: policyBuffer, + contentLength: int64(len(policyBytes)), + contentMD5Bytes: sumMD5(policyBytes), + contentSHA256Bytes: sum256(policyBytes), + } + + // Execute PUT to upload a new bucket policy. + resp, err := c.executeMethod("PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Removes all policies on a bucket. +func (c Client) removeBucketPolicy(bucketName string) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod("DELETE", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// SetBucketNotification saves a new bucket notification. +func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("notification", "") + + notifBytes, err := xml.Marshal(bucketNotification) + if err != nil { + return err + } + + notifBuffer := bytes.NewReader(notifBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: notifBuffer, + contentLength: int64(len(notifBytes)), + contentMD5Bytes: sumMD5(notifBytes), + contentSHA256Bytes: sum256(notifBytes), + } + + // Execute PUT to upload a new bucket notification. + resp, err := c.executeMethod("PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config +func (c Client) RemoveAllBucketNotification(bucketName string) error { + return c.SetBucketNotification(bucketName, BucketNotification{}) +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-put-object-common.go b/src/server/vendor/github.com/minio/minio-go/api-put-object-common.go new file mode 100644 index 00000000000..5f5f568e6fe --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-put-object-common.go @@ -0,0 +1,251 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "hash" + "io" + "io/ioutil" + "math" + "os" +) + +// Verify if reader is *os.File +func isFile(reader io.Reader) (ok bool) { + _, ok = reader.(*os.File) + return +} + +// Verify if reader is *minio.Object +func isObject(reader io.Reader) (ok bool) { + _, ok = reader.(*Object) + return +} + +// Verify if reader is a generic ReaderAt +func isReadAt(reader io.Reader) (ok bool) { + _, ok = reader.(io.ReaderAt) + return +} + +// shouldUploadPart - verify if part should be uploaded. +func shouldUploadPart(objPart objectPart, uploadReq uploadPartReq) bool { + // If part not found should upload the part. + if uploadReq.Part == nil { + return true + } + // if size mismatches should upload the part. + if objPart.Size != uploadReq.Part.Size { + return true + } + // if md5sum mismatches should upload the part. + if objPart.ETag != uploadReq.Part.ETag { + return true + } + return false +} + +// optimalPartInfo - calculate the optimal part info for a given +// object size. +// +// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible +// object storage it will have the following parameters as constants. +// +// maxPartsCount - 10000 +// minPartSize - 64MiB +// maxMultipartPutObjectSize - 5TiB +// +func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { + // object size is '-1' set it to 5TiB. + if objectSize == -1 { + objectSize = maxMultipartPutObjectSize + } + // object size is larger than supported maximum. + if objectSize > maxMultipartPutObjectSize { + err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") + return + } + // Use floats for part size for all calculations to avoid + // overflows during float64 to int64 conversions. + partSizeFlt := math.Ceil(float64(objectSize / maxPartsCount)) + partSizeFlt = math.Ceil(partSizeFlt/minPartSize) * minPartSize + // Total parts count. + totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) + // Part size. + partSize = int64(partSizeFlt) + // Last part size. + lastPartSize = objectSize - int64(totalPartsCount-1)*partSize + return totalPartsCount, partSize, lastPartSize, nil +} + +// hashCopyBuffer is identical to hashCopyN except that it doesn't take +// any size argument but takes a buffer argument and reader should be +// of io.ReaderAt interface. +// +// Stages reads from offsets into the buffer, if buffer is nil it is +// initialized to optimalBufferSize. +func hashCopyBuffer(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.ReaderAt, buf []byte) (size int64, err error) { + hashWriter := writer + for _, v := range hashAlgorithms { + hashWriter = io.MultiWriter(hashWriter, v) + } + + // Buffer is nil, initialize. + if buf == nil { + buf = make([]byte, optimalReadBufferSize) + } + + // Offset to start reading from. + var readAtOffset int64 + + // Following block reads data at an offset from the input + // reader and copies data to into local temporary file. + for { + readAtSize, rerr := reader.ReadAt(buf, readAtOffset) + if rerr != nil { + if rerr != io.EOF { + return 0, rerr + } + } + writeSize, werr := hashWriter.Write(buf[:readAtSize]) + if werr != nil { + return 0, werr + } + if readAtSize != writeSize { + return 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue) + } + readAtOffset += int64(writeSize) + size += int64(writeSize) + if rerr == io.EOF { + break + } + } + + for k, v := range hashAlgorithms { + hashSums[k] = v.Sum(nil) + } + return size, err +} + +// hashCopyN - Calculates chosen hashes up to partSize amount of bytes. +func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.Reader, partSize int64) (size int64, err error) { + hashWriter := writer + for _, v := range hashAlgorithms { + hashWriter = io.MultiWriter(hashWriter, v) + } + + // Copies to input at writer. + size, err = io.CopyN(hashWriter, reader, partSize) + if err != nil { + // If not EOF return error right here. + if err != io.EOF { + return 0, err + } + } + + for k, v := range hashAlgorithms { + hashSums[k] = v.Sum(nil) + } + return size, err +} + +// getUploadID - fetch upload id if already present for an object name +// or initiate a new request to fetch a new upload id. +func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return "", err + } + if err := isValidObjectName(objectName); err != nil { + return "", err + } + + // Initiate multipart upload for an object. + initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, metaData) + if err != nil { + return "", err + } + return initMultipartUploadResult.UploadID, nil +} + +// getMpartUploadSession returns the upload id and the uploaded parts to continue a previous upload session +// or initiate a new multipart session if no current one found +func (c Client) getMpartUploadSession(bucketName, objectName string, metaData map[string][]string) (string, map[int]objectPart, error) { + // A map of all uploaded parts. + var partsInfo map[int]objectPart + var err error + + uploadID, err := c.findUploadID(bucketName, objectName) + if err != nil { + return "", nil, err + } + + if uploadID == "" { + // Initiates a new multipart request + uploadID, err = c.newUploadID(bucketName, objectName, metaData) + if err != nil { + return "", nil, err + } + } else { + // Fetch previously upload parts and maximum part size. + partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + // When the server returns NoSuchUpload even if its previouls acknowleged the existance of the upload id, + // initiate a new multipart upload + if respErr, ok := err.(ErrorResponse); ok && respErr.Code == "NoSuchUpload" { + uploadID, err = c.newUploadID(bucketName, objectName, metaData) + if err != nil { + return "", nil, err + } + } else { + return "", nil, err + } + } + } + + // Allocate partsInfo if not done yet + if partsInfo == nil { + partsInfo = make(map[int]objectPart) + } + + return uploadID, partsInfo, nil +} + +// computeHash - Calculates hashes for an input read Seeker. +func computeHash(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, reader io.ReadSeeker) (size int64, err error) { + hashWriter := ioutil.Discard + for _, v := range hashAlgorithms { + hashWriter = io.MultiWriter(hashWriter, v) + } + + // If no buffer is provided, no need to allocate just use io.Copy. + size, err = io.Copy(hashWriter, reader) + if err != nil { + return 0, err + } + + // Seek back reader to the beginning location. + if _, err := reader.Seek(0, 0); err != nil { + return 0, err + } + + for k, v := range hashAlgorithms { + hashSums[k] = v.Sum(nil) + } + return size, nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-put-object-copy.go b/src/server/vendor/github.com/minio/minio-go/api-put-object-copy.go new file mode 100644 index 00000000000..56978d4270e --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-put-object-copy.go @@ -0,0 +1,72 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// CopyObject - copy a source object into a new object with the provided name in the provided bucket +func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err + } + if objectSource == "" { + return ErrInvalidArgument("Object source cannot be empty.") + } + + // customHeaders apply headers. + customHeaders := make(http.Header) + for _, cond := range cpCond.conditions { + customHeaders.Set(cond.key, cond.value) + } + + // Set copy source. + customHeaders.Set("x-amz-copy-source", s3utils.EncodePath(objectSource)) + + // Execute PUT on objectName. + resp, err := c.executeMethod("PUT", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeaders, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Decode copy response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return err + } + + // Return nil on success. + return nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-put-object-file.go b/src/server/vendor/github.com/minio/minio-go/api-put-object-file.go new file mode 100644 index 00000000000..aa554b321ec --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-put-object-file.go @@ -0,0 +1,308 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/md5" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + "io" + "io/ioutil" + "mime" + "os" + "path/filepath" + "sort" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// FPutObject - Create an object in a bucket, with contents from file at filePath. +func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // Open the referenced file. + fileReader, err := os.Open(filePath) + // If any error fail quickly here. + if err != nil { + return 0, err + } + defer fileReader.Close() + + // Save the file stat. + fileStat, err := fileReader.Stat() + if err != nil { + return 0, err + } + + // Save the file size. + fileSize := fileStat.Size() + + // Check for largest object size allowed. + if fileSize > int64(maxMultipartPutObjectSize) { + return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName) + } + + objMetadata := make(map[string][]string) + + // Set contentType based on filepath extension if not given or default + // value of "binary/octet-stream" if the extension has no associated type. + if contentType == "" { + if contentType = mime.TypeByExtension(filepath.Ext(filePath)); contentType == "" { + contentType = "application/octet-stream" + } + } + + objMetadata["Content-Type"] = []string{contentType} + + // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. + // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers. + if s3utils.IsGoogleEndpoint(c.endpointURL) { + if fileSize > int64(maxSinglePutObjectSize) { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize), + Key: objectName, + BucketName: bucketName, + } + } + // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size. + return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil) + } + + // NOTE: S3 doesn't allow anonymous multipart requests. + if s3utils.IsAmazonEndpoint(c.endpointURL) && c.anonymous { + if fileSize > int64(maxSinglePutObjectSize) { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize), + Key: objectName, + BucketName: bucketName, + } + } + // Do not compute MD5 for anonymous requests to Amazon + // S3. Uploads up to 5GiB in size. + return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil) + } + + // Small object upload is initiated for uploads for input data size smaller than 5MiB. + if fileSize < minPartSize && fileSize >= 0 { + return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil) + } + // Upload all large objects as multipart. + n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, objMetadata, nil) + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "NotImplemented" { + // If size of file is greater than '5GiB' fail. + if fileSize > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil) + } + return n, err + } + return n, nil +} + +// putObjectMultipartFromFile - Creates object from contents of *os.File +// +// NOTE: This function is meant to be used for readers with local +// file as in *os.File. This function resumes by skipping all the +// necessary parts which were already uploaded by verifying them +// against MD5SUM of each individual parts. This function also +// effectively utilizes file system capabilities of reading from +// specific sections and not having to create temporary files. +func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // Get the upload id of a previously partially uploaded object or initiate a new multipart upload + uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData) + if err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(fileSize) + if err != nil { + return 0, err + } + + // Create a channel to communicate a part was uploaded. + // Buffer this to 10000, the maximum number of parts allowed by S3. + uploadedPartsCh := make(chan uploadedPartRes, 10000) + + // Create a channel to communicate which part to upload. + // Buffer this to 10000, the maximum number of parts allowed by S3. + uploadPartsCh := make(chan uploadPartReq, 10000) + + // Just for readability. + lastPartNumber := totalPartsCount + + // Send each part through the partUploadCh to be uploaded. + for p := 1; p <= totalPartsCount; p++ { + part, ok := partsInfo[p] + if ok { + uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part} + } else { + uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} + } + } + close(uploadPartsCh) + + // Use three 'workers' to upload parts in parallel. + for w := 1; w <= 3; w++ { + go func() { + // Deal with each part as it comes through the channel. + for uploadReq := range uploadPartsCh { + // Add hash algorithms that need to be calculated by computeHash() + // In case of a non-v4 signature or https connection, sha256 is not needed. + hashAlgos := make(map[string]hash.Hash) + hashSums := make(map[string][]byte) + hashAlgos["md5"] = md5.New() + if c.signature.isV4() && !c.secure { + hashAlgos["sha256"] = sha256.New() + } + + // If partNumber was not uploaded we calculate the missing + // part offset and size. For all other part numbers we + // calculate offset based on multiples of partSize. + readOffset := int64(uploadReq.PartNum-1) * partSize + missingPartSize := partSize + + // As a special case if partNumber is lastPartNumber, we + // calculate the offset based on the last part size. + if uploadReq.PartNum == lastPartNumber { + readOffset = (fileSize - lastPartSize) + missingPartSize = lastPartSize + } + + // Get a section reader on a particular offset. + sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize) + var prtSize int64 + var err error + + prtSize, err = computeHash(hashAlgos, hashSums, sectionReader) + if err != nil { + uploadedPartsCh <- uploadedPartRes{ + Error: err, + } + // Exit the goroutine. + return + } + + // Create the part to be uploaded. + verifyObjPart := objectPart{ + ETag: hex.EncodeToString(hashSums["md5"]), + PartNumber: uploadReq.PartNum, + Size: partSize, + } + + // If this is the last part do not give it the full part size. + if uploadReq.PartNum == lastPartNumber { + verifyObjPart.Size = lastPartSize + } + + // Verify if part should be uploaded. + if shouldUploadPart(verifyObjPart, uploadReq) { + // Proceed to upload the part. + var objPart objectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize) + if err != nil { + uploadedPartsCh <- uploadedPartRes{ + Error: err, + } + // Exit the goroutine. + return + } + // Save successfully uploaded part metadata. + uploadReq.Part = &objPart + } + // Return through the channel the part size. + uploadedPartsCh <- uploadedPartRes{ + Size: verifyObjPart.Size, + PartNum: uploadReq.PartNum, + Part: uploadReq.Part, + Error: nil, + } + } + }() + } + + // Retrieve each uploaded part once it is done. + for u := 1; u <= totalPartsCount; u++ { + uploadRes := <-uploadedPartsCh + if uploadRes.Error != nil { + return totalUploadedSize, uploadRes.Error + } + // Retrieve each uploaded part and store it to be completed. + part := uploadRes.Part + if part == nil { + return totalUploadedSize, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum)) + } + // Update the total uploaded size. + totalUploadedSize += uploadRes.Size + // Update the progress bar if there is one. + if progress != nil { + if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil { + return totalUploadedSize, err + } + } + // Store the part to be completed. + complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Verify if we uploaded all data. + if totalUploadedSize != fileSize { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/src/server/vendor/github.com/minio/minio-go/api-put-object-multipart.go new file mode 100644 index 00000000000..f74eae62654 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-put-object-multipart.go @@ -0,0 +1,385 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/hex" + "encoding/xml" + "fmt" + "hash" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "sort" + "strconv" + "strings" +) + +// Comprehensive put object operation involving multipart resumable uploads. +// +// Following code handles these types of readers. +// +// - *os.File +// - *minio.Object +// - Any reader which has a method 'ReadAt()' +// +// If we exhaust all the known types, code proceeds to use stream as +// is where each part is re-downloaded, checksummed and verified +// before upload. +func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { + if size > 0 && size > minPartSize { + // Verify if reader is *os.File, then use file system functionalities. + if isFile(reader) { + return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, metaData, progress) + } + // Verify if reader is *minio.Object or io.ReaderAt. + // NOTE: Verification of object is kept for a specific purpose + // while it is going to be duck typed similar to io.ReaderAt. + // It is to indicate that *minio.Object implements io.ReaderAt. + // and such a functionality is used in the subsequent code + // path. + if isObject(reader) || isReadAt(reader) { + return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metaData, progress) + } + } + // For any other data size and reader type we do generic multipart + // approach by staging data in temporary files and uploading them. + return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress) +} + +// putObjectStream uploads files bigger than 64MiB, and also supports +// special case where size is unknown i.e '-1'. +func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Get the upload id of a previously partially uploaded object or initiate a new multipart upload + uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData) + if err != nil { + return 0, err + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := optimalPartInfo(size) + if err != nil { + return 0, err + } + + // Part number always starts with '1'. + partNumber := 1 + + // Initialize a temporary buffer. + tmpBuffer := new(bytes.Buffer) + + for partNumber <= totalPartsCount { + // Choose hash algorithms to be calculated by hashCopyN, avoid sha256 + // with non-v4 signature request or HTTPS connection + hashSums := make(map[string][]byte) + hashAlgos := make(map[string]hash.Hash) + hashAlgos["md5"] = md5.New() + if c.signature.isV4() && !c.secure { + hashAlgos["sha256"] = sha256.New() + } + + // Calculates hash sums while copying partSize bytes into tmpBuffer. + prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize) + if rErr != nil { + if rErr != io.EOF { + return 0, rErr + } + } + + var reader io.Reader + // Update progress reader appropriately to the latest offset + // as we read from the source. + reader = newHook(tmpBuffer, progress) + + part, ok := partsInfo[partNumber] + + // Verify if part should be uploaded. + if !ok || shouldUploadPart(objectPart{ + ETag: hex.EncodeToString(hashSums["md5"]), + PartNumber: partNumber, + Size: prtSize, + }, uploadPartReq{PartNum: partNumber, Part: &part}) { + // Proceed to upload the part. + var objPart objectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize) + if err != nil { + // Reset the temporary buffer upon any error. + tmpBuffer.Reset() + return totalUploadedSize, err + } + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + } else { + // Update the progress reader for the skipped part. + if progress != nil { + if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil { + return totalUploadedSize, err + } + } + } + + // Reset the temporary buffer. + tmpBuffer.Reset() + + // Save successfully uploaded size. + totalUploadedSize += prtSize + + // Increment part number. + partNumber++ + + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if size < 0 && rErr == io.EOF { + break + } + } + + // Verify if we uploaded all the data. + if size > 0 { + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + } + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} + +// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. +func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return initiateMultipartUploadResult{}, err + } + if err := isValidObjectName(objectName); err != nil { + return initiateMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploads", "") + + // Set ContentType header. + customHeader := make(http.Header) + for k, v := range metaData { + if len(v) > 0 { + customHeader.Set(k, v[0]) + } + } + + // Set a default content-type header if the latter is not provided + if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 { + customHeader.Set("Content-Type", "application/octet-stream") + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + } + + // Execute POST on an objectName to initiate multipart upload. + resp, err := c.executeMethod("POST", reqMetadata) + defer closeResponse(resp) + if err != nil { + return initiateMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode xml for new multipart upload. + initiateMultipartUploadResult := initiateMultipartUploadResult{} + err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) + if err != nil { + return initiateMultipartUploadResult, err + } + return initiateMultipartUploadResult, nil +} + +// uploadPart - Uploads a part in a multipart upload. +func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return objectPart{}, err + } + if err := isValidObjectName(objectName); err != nil { + return objectPart{}, err + } + if size > maxPartSize { + return objectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName) + } + if size <= -1 { + return objectPart{}, ErrEntityTooSmall(size, bucketName, objectName) + } + if partNumber <= 0 { + return objectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.") + } + if uploadID == "" { + return objectPart{}, ErrInvalidArgument("UploadID cannot be empty.") + } + + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number. + urlValues.Set("partNumber", strconv.Itoa(partNumber)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: reader, + contentLength: size, + contentMD5Bytes: md5Sum, + contentSHA256Bytes: sha256Sum, + } + + // Execute PUT on each part. + resp, err := c.executeMethod("PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return objectPart{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return objectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Once successfully uploaded, return completed part. + objPart := objectPart{} + objPart.Size = size + objPart.PartNumber = partNumber + // Trim off the odd double quotes from ETag in the beginning and end. + objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"") + return objPart, nil +} + +// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. +func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return completeMultipartUploadResult{}, err + } + if err := isValidObjectName(objectName); err != nil { + return completeMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // Marshal complete multipart body. + completeMultipartUploadBytes, err := xml.Marshal(complete) + if err != nil { + return completeMultipartUploadResult{}, err + } + + // Instantiate all the complete multipart buffer. + completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: completeMultipartUploadBuffer, + contentLength: int64(len(completeMultipartUploadBytes)), + contentSHA256Bytes: sum256(completeMultipartUploadBytes), + } + + // Execute POST to complete multipart upload for an objectName. + resp, err := c.executeMethod("POST", reqMetadata) + defer closeResponse(resp) + if err != nil { + return completeMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Read resp.Body into a []bytes to parse for Error response inside the body + var b []byte + b, err = ioutil.ReadAll(resp.Body) + if err != nil { + return completeMultipartUploadResult{}, err + } + // Decode completed multipart upload response on success. + completeMultipartUploadResult := completeMultipartUploadResult{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return completeMultipartUploadResult, err + } else if completeMultipartUploadResult.Bucket == "" { + // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. + // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values + // of the members. + + // Decode completed multipart upload response on failure + completeMultipartUploadErr := ErrorResponse{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return completeMultipartUploadResult, err + } + return completeMultipartUploadResult, completeMultipartUploadErr + } + return completeMultipartUploadResult, nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-put-object-progress.go b/src/server/vendor/github.com/minio/minio-go/api-put-object-progress.go new file mode 100644 index 00000000000..42f8ce4d1cd --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-put-object-progress.go @@ -0,0 +1,117 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "io" + "strings" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// PutObjectWithProgress - with progress. +func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) { + metaData := make(map[string][]string) + metaData["Content-Type"] = []string{contentType} + return c.PutObjectWithMetadata(bucketName, objectName, reader, metaData, progress) +} + +// PutObjectWithMetadata - with metadata. +func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + if reader == nil { + return 0, ErrInvalidArgument("Input reader is invalid, cannot be nil.") + } + + // Size of the object. + var size int64 + + // Get reader size. + size, err = getReaderSize(reader) + if err != nil { + return 0, err + } + + // Check for largest object size allowed. + if size > int64(maxMultipartPutObjectSize) { + return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) + } + + // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. + // So we fall back to single PUT operation with the maximum limit of 5GiB. + if s3utils.IsGoogleEndpoint(c.endpointURL) { + if size <= -1 { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.", + Key: objectName, + BucketName: bucketName, + } + } + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size. + return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress) + } + + // NOTE: S3 doesn't allow anonymous multipart requests. + if s3utils.IsAmazonEndpoint(c.endpointURL) && c.anonymous { + if size <= -1 { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: "Content-Length cannot be negative for anonymous requests.", + Key: objectName, + BucketName: bucketName, + } + } + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Do not compute MD5 for anonymous requests to Amazon + // S3. Uploads up to 5GiB in size. + return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress) + } + + // putSmall object. + if size < minPartSize && size >= 0 { + return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress) + } + // For all sizes greater than 5MiB do multipart. + n, err = c.putObjectMultipart(bucketName, objectName, reader, size, metaData, progress) + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress) + } + return n, err + } + return n, nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-put-object-readat.go b/src/server/vendor/github.com/minio/minio-go/api-put-object-readat.go new file mode 100644 index 00000000000..4ab1095f640 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-put-object-readat.go @@ -0,0 +1,247 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "fmt" + "hash" + "io" + "io/ioutil" + "sort" +) + +// uploadedPartRes - the response received from a part upload. +type uploadedPartRes struct { + Error error // Any error encountered while uploading the part. + PartNum int // Number of the part uploaded. + Size int64 // Size of the part uploaded. + Part *objectPart +} + +type uploadPartReq struct { + PartNum int // Number of the part uploaded. + Part *objectPart // Size of the part uploaded. +} + +// shouldUploadPartReadAt - verify if part should be uploaded. +func shouldUploadPartReadAt(objPart objectPart, uploadReq uploadPartReq) bool { + // If part not found part should be uploaded. + if uploadReq.Part == nil { + return true + } + // if size mismatches part should be uploaded. + if uploadReq.Part.Size != objPart.Size { + return true + } + return false +} + +// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader +// of type which implements io.ReaderAt interface (ReadAt method). +// +// NOTE: This function is meant to be used for all readers which +// implement io.ReaderAt which allows us for resuming multipart +// uploads but reading at an offset, which would avoid re-read the +// data which was already uploaded. Internally this function uses +// temporary files for staging all the data, these temporary files are +// cleaned automatically when the caller i.e http client closes the +// stream after uploading all the contents successfully. +func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // Get the upload id of a previously partially uploaded object or initiate a new multipart upload + uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData) + if err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) + if err != nil { + return 0, err + } + + // Used for readability, lastPartNumber is always totalPartsCount. + lastPartNumber := totalPartsCount + + // Declare a channel that sends the next part number to be uploaded. + // Buffered to 10000 because thats the maximum number of parts allowed + // by S3. + uploadPartsCh := make(chan uploadPartReq, 10000) + + // Declare a channel that sends back the response of a part upload. + // Buffered to 10000 because thats the maximum number of parts allowed + // by S3. + uploadedPartsCh := make(chan uploadedPartRes, 10000) + + // Send each part number to the channel to be processed. + for p := 1; p <= totalPartsCount; p++ { + part, ok := partsInfo[p] + if ok { + uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part} + } else { + uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} + } + } + close(uploadPartsCh) + + // Receive each part number from the channel allowing three parallel uploads. + for w := 1; w <= 3; w++ { + go func() { + // Read defaults to reading at 5MiB buffer. + readAtBuffer := make([]byte, optimalReadBufferSize) + + // Each worker will draw from the part channel and upload in parallel. + for uploadReq := range uploadPartsCh { + // Declare a new tmpBuffer. + tmpBuffer := new(bytes.Buffer) + + // If partNumber was not uploaded we calculate the missing + // part offset and size. For all other part numbers we + // calculate offset based on multiples of partSize. + readOffset := int64(uploadReq.PartNum-1) * partSize + missingPartSize := partSize + + // As a special case if partNumber is lastPartNumber, we + // calculate the offset based on the last part size. + if uploadReq.PartNum == lastPartNumber { + readOffset = (size - lastPartSize) + missingPartSize = lastPartSize + } + + // Get a section reader on a particular offset. + sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize) + + // Choose the needed hash algorithms to be calculated by hashCopyBuffer. + // Sha256 is avoided in non-v4 signature requests or HTTPS connections + hashSums := make(map[string][]byte) + hashAlgos := make(map[string]hash.Hash) + hashAlgos["md5"] = md5.New() + if c.signature.isV4() && !c.secure { + hashAlgos["sha256"] = sha256.New() + } + + var prtSize int64 + var err error + prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer) + if err != nil { + // Send the error back through the channel. + uploadedPartsCh <- uploadedPartRes{ + Size: 0, + Error: err, + } + // Exit the goroutine. + return + } + + // Verify object if its uploaded. + verifyObjPart := objectPart{ + PartNumber: uploadReq.PartNum, + Size: partSize, + } + // Special case if we see a last part number, save last part + // size as the proper part size. + if uploadReq.PartNum == lastPartNumber { + verifyObjPart.Size = lastPartSize + } + + // Only upload the necessary parts. Otherwise return size through channel + // to update any progress bar. + if shouldUploadPartReadAt(verifyObjPart, uploadReq) { + // Proceed to upload the part. + var objPart objectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize) + if err != nil { + uploadedPartsCh <- uploadedPartRes{ + Size: 0, + Error: err, + } + // Exit the goroutine. + return + } + // Save successfully uploaded part metadata. + uploadReq.Part = &objPart + } + // Send successful part info through the channel. + uploadedPartsCh <- uploadedPartRes{ + Size: verifyObjPart.Size, + PartNum: uploadReq.PartNum, + Part: uploadReq.Part, + Error: nil, + } + } + }() + } + + // Gather the responses as they occur and update any + // progress bar. + for u := 1; u <= totalPartsCount; u++ { + uploadRes := <-uploadedPartsCh + if uploadRes.Error != nil { + return totalUploadedSize, uploadRes.Error + } + // Retrieve each uploaded part and store it to be completed. + // part, ok := partsInfo[uploadRes.PartNum] + part := uploadRes.Part + if part == nil { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum)) + } + // Update the totalUploadedSize. + totalUploadedSize += uploadRes.Size + // Update the progress bar if there is one. + if progress != nil { + if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil { + return totalUploadedSize, err + } + } + // Store the parts to be completed in order. + complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Verify if we uploaded all the data. + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-put-object.go b/src/server/vendor/github.com/minio/minio-go/api-put-object.go new file mode 100644 index 00000000000..a779fbebedc --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-put-object.go @@ -0,0 +1,321 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "hash" + "io" + "io/ioutil" + "net/http" + "os" + "reflect" + "runtime" + "strings" +) + +// toInt - converts go value to its integer representation based +// on the value kind if it is an integer. +func toInt(value reflect.Value) (size int64) { + size = -1 + if value.IsValid() { + switch value.Kind() { + case reflect.Int: + fallthrough + case reflect.Int8: + fallthrough + case reflect.Int16: + fallthrough + case reflect.Int32: + fallthrough + case reflect.Int64: + size = value.Int() + } + } + return size +} + +// getReaderSize - Determine the size of Reader if available. +func getReaderSize(reader io.Reader) (size int64, err error) { + size = -1 + if reader == nil { + return -1, nil + } + // Verify if there is a method by name 'Size'. + sizeFn := reflect.ValueOf(reader).MethodByName("Size") + // Verify if there is a method by name 'Len'. + lenFn := reflect.ValueOf(reader).MethodByName("Len") + if sizeFn.IsValid() { + if sizeFn.Kind() == reflect.Func { + // Call the 'Size' function and save its return value. + result := sizeFn.Call([]reflect.Value{}) + if len(result) == 1 { + size = toInt(result[0]) + } + } + } else if lenFn.IsValid() { + if lenFn.Kind() == reflect.Func { + // Call the 'Len' function and save its return value. + result := lenFn.Call([]reflect.Value{}) + if len(result) == 1 { + size = toInt(result[0]) + } + } + } else { + // Fallback to Stat() method, two possible Stat() structs exist. + switch v := reader.(type) { + case *os.File: + var st os.FileInfo + st, err = v.Stat() + if err != nil { + // Handle this case specially for "windows", + // certain files for example 'Stdin', 'Stdout' and + // 'Stderr' it is not allowed to fetch file information. + if runtime.GOOS == "windows" { + if strings.Contains(err.Error(), "GetFileInformationByHandle") { + return -1, nil + } + } + return + } + // Ignore if input is a directory, throw an error. + if st.Mode().IsDir() { + return -1, ErrInvalidArgument("Input file cannot be a directory.") + } + // Ignore 'Stdin', 'Stdout' and 'Stderr', since they + // represent *os.File type but internally do not + // implement Seekable calls. Ignore them and treat + // them like a stream with unknown length. + switch st.Name() { + case "stdin", "stdout", "stderr": + return + // Ignore read/write stream of os.Pipe() which have unknown length too. + case "|0", "|1": + return + } + size = st.Size() + case *Object: + var st ObjectInfo + st, err = v.Stat() + if err != nil { + return + } + size = st.Size + } + } + // Returns the size here. + return size, err +} + +// completedParts is a collection of parts sortable by their part numbers. +// used for sorting the uploaded parts before completing the multipart request. +type completedParts []completePart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// PutObject creates an object in a bucket. +// +// You must have WRITE permissions on a bucket to create an object. +// +// - For size smaller than 5MiB PutObject automatically does a single atomic Put operation. +// - For size larger than 5MiB PutObject automatically does a resumable multipart Put operation. +// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF. +// Maximum object size that can be uploaded through this operation will be 5TiB. +// +// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. +// So we fall back to single PUT operation with the maximum limit of 5GiB. +// +// NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation. +func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) { + return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil) +} + +// putObjectNoChecksum special function used Google Cloud Storage. This special function +// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. +func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + + // Update progress reader appropriately to the latest offset as we + // read from the source. + readSeeker := newHook(reader, progress) + + // This function does not calculate sha256 and md5sum for payload. + // Execute put object. + st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData) + if err != nil { + return 0, err + } + if st.Size != size { + return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) + } + return size, nil +} + +// putObjectSingle is a special function for uploading single put object request. +// This special function is used as a fallback when multipart upload fails. +func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // If size is a stream, upload up to 5GiB. + if size <= -1 { + size = maxSinglePutObjectSize + } + + // Add the appropriate hash algorithms that need to be calculated by hashCopyN + // In case of non-v4 signature request or HTTPS connection, sha256 is not needed. + hashAlgos := make(map[string]hash.Hash) + hashSums := make(map[string][]byte) + hashAlgos["md5"] = md5.New() + if c.signature.isV4() && !c.secure { + hashAlgos["sha256"] = sha256.New() + } + + if size <= minPartSize { + // Initialize a new temporary buffer. + tmpBuffer := new(bytes.Buffer) + size, err = hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, size) + reader = bytes.NewReader(tmpBuffer.Bytes()) + tmpBuffer.Reset() + } else { + // Initialize a new temporary file. + var tmpFile *tempFile + tmpFile, err = newTempFile("single$-putobject-single") + if err != nil { + return 0, err + } + defer tmpFile.Close() + size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size) + if err != nil { + return 0, err + } + // Seek back to beginning of the temporary file. + if _, err = tmpFile.Seek(0, 0); err != nil { + return 0, err + } + reader = tmpFile + } + // Return error if its not io.EOF. + if err != nil { + if err != io.EOF { + return 0, err + } + } + // Execute put object. + st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData) + if err != nil { + return 0, err + } + if st.Size != size { + return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) + } + // Progress the reader to the size if putObjectDo is successful. + if progress != nil { + if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil { + return size, err + } + } + return size, nil +} + +// putObjectDo - executes the put object http operation. +// NOTE: You must have WRITE permissions on a bucket to add an object to it. +func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := isValidObjectName(objectName); err != nil { + return ObjectInfo{}, err + } + + if size <= -1 { + return ObjectInfo{}, ErrEntityTooSmall(size, bucketName, objectName) + } + + if size > maxSinglePutObjectSize { + return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + + // Set headers. + customHeader := make(http.Header) + + // Set metadata to headers + for k, v := range metaData { + if len(v) > 0 { + customHeader.Set(k, v[0]) + } + } + + // If Content-Type is not provided, set the default application/octet-stream one + if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 { + customHeader.Set("Content-Type", "application/octet-stream") + } + + // Populate request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Bytes: md5Sum, + contentSHA256Bytes: sha256Sum, + } + + // Execute PUT an objectName. + resp, err := c.executeMethod("PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + var objInfo ObjectInfo + // Trim off the odd double quotes from ETag in the beginning and end. + objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"") + // A success here means data was written to server successfully. + objInfo.Size = size + + // Return here. + return objInfo, nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-remove.go b/src/server/vendor/github.com/minio/minio-go/api-remove.go new file mode 100644 index 00000000000..2ca84458ef1 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-remove.go @@ -0,0 +1,284 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/xml" + "io" + "net/http" + "net/url" +) + +// RemoveBucket deletes the bucket name. +// +// All objects (including all object versions and delete markers). +// in the bucket must be deleted before successfully attempting this request. +func (c Client) RemoveBucket(bucketName string) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + // Execute DELETE on bucket. + resp, err := c.executeMethod("DELETE", requestMetadata{ + bucketName: bucketName, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Remove the location from cache on a successful delete. + c.bucketLocCache.Delete(bucketName) + + return nil +} + +// RemoveObject remove an object from a bucket. +func (c Client) RemoveObject(bucketName, objectName string) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err + } + // Execute DELETE on objectName. + resp, err := c.executeMethod("DELETE", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + // if some unexpected error happened and max retry is reached, we want to let client know + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // DeleteObject always responds with http '204' even for + // objects which do not exist. So no need to handle them + // specifically. + return nil +} + +// RemoveObjectError - container of Multi Delete S3 API error +type RemoveObjectError struct { + ObjectName string + Err error +} + +// generateRemoveMultiObjects - generate the XML request for remove multi objects request +func generateRemoveMultiObjectsRequest(objects []string) []byte { + rmObjects := []deleteObject{} + for _, obj := range objects { + rmObjects = append(rmObjects, deleteObject{Key: obj}) + } + xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true}) + return xmlBytes +} + +// processRemoveMultiObjectsResponse - parse the remove multi objects web service +// and return the success/failure result status for each object +func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) { + // Parse multi delete XML response + rmResult := &deleteMultiObjectsResult{} + err := xmlDecoder(body, rmResult) + if err != nil { + errorCh <- RemoveObjectError{ObjectName: "", Err: err} + return + } + + // Fill deletion that returned an error. + for _, obj := range rmResult.UnDeletedObjects { + errorCh <- RemoveObjectError{ + ObjectName: obj.Key, + Err: ErrorResponse{ + Code: obj.Code, + Message: obj.Message, + }, + } + } +} + +// RemoveObjects remove multiples objects from a bucket. +// The list of objects to remove are received from objectsCh. +// Remove failures are sent back via error channel. +func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { + errorCh := make(chan RemoveObjectError, 1) + + // Validate if bucket name is valid. + if err := isValidBucketName(bucketName); err != nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: err, + } + return errorCh + } + // Validate objects channel to be properly allocated. + if objectsCh == nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: ErrInvalidArgument("Objects channel cannot be nil"), + } + return errorCh + } + + // Generate and call MultiDelete S3 requests based on entries received from objectsCh + go func(errorCh chan<- RemoveObjectError) { + maxEntries := 1000 + finish := false + urlValues := make(url.Values) + urlValues.Set("delete", "") + + // Close error channel when Multi delete finishes. + defer close(errorCh) + + // Loop over entries by 1000 and call MultiDelete requests + for { + if finish { + break + } + count := 0 + var batch []string + + // Try to gather 1000 entries + for object := range objectsCh { + batch = append(batch, object) + if count++; count >= maxEntries { + break + } + } + if count == 0 { + // Multi Objects Delete API doesn't accept empty object list, quit immediatly + break + } + if count < maxEntries { + // We didn't have 1000 entries, so this is the last batch + finish = true + } + + // Generate remove multi objects XML request + removeBytes := generateRemoveMultiObjectsRequest(batch) + // Execute GET on bucket to list objects. + resp, err := c.executeMethod("POST", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(removeBytes), + contentLength: int64(len(removeBytes)), + contentMD5Bytes: sumMD5(removeBytes), + contentSHA256Bytes: sum256(removeBytes), + }) + if err != nil { + for _, b := range batch { + errorCh <- RemoveObjectError{ObjectName: b, Err: err} + } + continue + } + + // Process multiobjects remove xml response + processRemoveMultiObjectsResponse(resp.Body, batch, errorCh) + + closeResponse(resp) + } + }(errorCh) + return errorCh +} + +// RemoveIncompleteUpload aborts an partially uploaded object. +// Requires explicit authentication, no anonymous requests are allowed for multipart API. +func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err + } + // Find multipart upload id of the object to be aborted. + uploadID, err := c.findUploadID(bucketName, objectName) + if err != nil { + return err + } + if uploadID != "" { + // Upload id found, abort the incomplete multipart upload. + err := c.abortMultipartUpload(bucketName, objectName, uploadID) + if err != nil { + return err + } + } + return nil +} + +// abortMultipartUpload aborts a multipart upload for the given +// uploadID, all previously uploaded parts are deleted. +func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // Execute DELETE on multipart upload. + resp, err := c.executeMethod("DELETE", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + // Abort has no response body, handle it for any errors. + var errorResponse ErrorResponse + switch resp.StatusCode { + case http.StatusNotFound: + // This is needed specifically for abort and it cannot + // be converged into default case. + errorResponse = ErrorResponse{ + Code: "NoSuchUpload", + Message: "The specified multipart upload does not exist.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + default: + return httpRespToErrorResponse(resp, bucketName, objectName) + } + return errorResponse + } + } + return nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-s3-datatypes.go b/src/server/vendor/github.com/minio/minio-go/api-s3-datatypes.go new file mode 100644 index 00000000000..a34f82e9736 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-s3-datatypes.go @@ -0,0 +1,244 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "time" +) + +// listAllMyBucketsResult container for listBuckets response. +type listAllMyBucketsResult struct { + // Container for one or more buckets. + Buckets struct { + Bucket []BucketInfo + } + Owner owner +} + +// owner container for bucket owner information. +type owner struct { + DisplayName string + ID string +} + +// commonPrefix container for prefix response. +type commonPrefix struct { + Prefix string +} + +// listBucketResult container for listObjects V2 response. +type listBucketV2Result struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []commonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + MaxKeys int64 + Name string + + // Hold the token that will be sent in the next request to fetch the next group of keys + NextContinuationToken string + + ContinuationToken string + Prefix string + + // FetchOwner and StartAfter are currently not used + FetchOwner string + StartAfter string +} + +// listBucketResult container for listObjects response. +type listBucketResult struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []commonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + Marker string + MaxKeys int64 + Name string + + // When response is truncated (the IsTruncated element value in + // the response is true), you can use the key name in this field + // as marker in the subsequent request to get next set of objects. + // Object storage lists objects in alphabetical order Note: This + // element is returned only if you have delimiter request + // parameter specified. If response does not include the NextMaker + // and it is truncated, you can use the value of the last Key in + // the response as the marker in the subsequent request to get the + // next set of object keys. + NextMarker string + Prefix string +} + +// listMultipartUploadsResult container for ListMultipartUploads response +type listMultipartUploadsResult struct { + Bucket string + KeyMarker string + UploadIDMarker string `xml:"UploadIdMarker"` + NextKeyMarker string + NextUploadIDMarker string `xml:"NextUploadIdMarker"` + EncodingType string + MaxUploads int64 + IsTruncated bool + Uploads []ObjectMultipartInfo `xml:"Upload"` + Prefix string + Delimiter string + // A response can contain CommonPrefixes only if you specify a delimiter. + CommonPrefixes []commonPrefix +} + +// initiator container for who initiated multipart upload. +type initiator struct { + ID string + DisplayName string +} + +// copyObjectResult container for copy object response. +type copyObjectResult struct { + ETag string + LastModified string // time string format "2006-01-02T15:04:05.000Z" +} + +// objectPart container for particular part of an object. +type objectPart struct { + // Part number identifies the part. + PartNumber int + + // Date and time the part was uploaded. + LastModified time.Time + + // Entity tag returned when the part was uploaded, usually md5sum + // of the part. + ETag string + + // Size of the uploaded part data. + Size int64 +} + +// listObjectPartsResult container for ListObjectParts response. +type listObjectPartsResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` + + Initiator initiator + Owner owner + + StorageClass string + PartNumberMarker int + NextPartNumberMarker int + MaxParts int + + // Indicates whether the returned list of parts is truncated. + IsTruncated bool + ObjectParts []objectPart `xml:"Part"` + + EncodingType string +} + +// initiateMultipartUploadResult container for InitiateMultiPartUpload +// response. +type initiateMultipartUploadResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` +} + +// completeMultipartUploadResult container for completed multipart +// upload response. +type completeMultipartUploadResult struct { + Location string + Bucket string + Key string + ETag string +} + +// completePart sub container lists individual part numbers and their +// md5sum, part of completeMultipartUpload. +type completePart struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` + + // Part number identifies the part. + PartNumber int + ETag string +} + +// completeMultipartUpload container for completing multipart upload. +type completeMultipartUpload struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` + Parts []completePart `xml:"Part"` +} + +// createBucketConfiguration container for bucket configuration. +type createBucketConfiguration struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` + Location string `xml:"LocationConstraint"` +} + +// deleteObject container for Delete element in MultiObjects Delete XML request +type deleteObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` +} + +// deletedObject container for Deleted element in MultiObjects Delete XML response +type deletedObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` + // These fields are ignored. + DeleteMarker bool + DeleteMarkerVersionID string +} + +// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response +type nonDeletedObject struct { + Key string + Code string + Message string +} + +// deletedMultiObjects container for MultiObjects Delete XML request +type deleteMultiObjects struct { + XMLName xml.Name `xml:"Delete"` + Quiet bool + Objects []deleteObject `xml:"Object"` +} + +// deletedMultiObjectsResult container for MultiObjects Delete XML response +type deleteMultiObjectsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjects []deletedObject `xml:"Deleted"` + UnDeletedObjects []nonDeletedObject `xml:"Error"` +} diff --git a/src/server/vendor/github.com/minio/minio-go/api-stat.go b/src/server/vendor/github.com/minio/minio-go/api-stat.go new file mode 100644 index 00000000000..e3bb115d4fc --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api-stat.go @@ -0,0 +1,158 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// BucketExists verify if bucket exists and you have permission to access it. +func (c Client) BucketExists(bucketName string) (bool, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return false, err + } + + // Execute HEAD on bucketName. + resp, err := c.executeMethod("HEAD", requestMetadata{ + bucketName: bucketName, + }) + defer closeResponse(resp) + if err != nil { + if ToErrorResponse(err).Code == "NoSuchBucket" { + return false, nil + } + return false, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return false, httpRespToErrorResponse(resp, bucketName, "") + } + } + return true, nil +} + +// List of header keys to be filtered, usually +// from all S3 API http responses. +var defaultFilterKeys = []string{ + "Transfer-Encoding", + "Accept-Ranges", + "Date", + "Server", + "Vary", + "x-amz-request-id", + "x-amz-id-2", + // Add new headers to be ignored. +} + +// Extract only necessary metadata header key/values by +// filtering them out with a list of custom header keys. +func extractObjMetadata(header http.Header) http.Header { + filterKeys := append([]string{ + "ETag", + "Content-Length", + "Last-Modified", + "Content-Type", + }, defaultFilterKeys...) + return filterHeader(header, filterKeys) +} + +// StatObject verifies if object exists and you have permission to access. +func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := isValidObjectName(objectName); err != nil { + return ObjectInfo{}, err + } + + // Execute HEAD on objectName. + resp, err := c.executeMethod("HEAD", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + }) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Trim off the odd double quotes from ETag in the beginning and end. + md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + md5sum = strings.TrimSuffix(md5sum, "\"") + + // Content-Length is not valid for Google Cloud Storage, do not verify. + var size int64 = -1 + if !s3utils.IsGoogleEndpoint(c.endpointURL) { + // Parse content length. + size, err = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: "Content-Length is invalid. " + reportIssue, + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + } + } + // Parse Last-Modified has http time format. + date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: "Last-Modified time format is invalid. " + reportIssue, + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + } + // Fetch content type if any present. + contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + + // Extract only the relevant header keys describing the object. + // following function filters out a list of standard set of keys + // which are not part of object metadata. + metadata := extractObjMetadata(resp.Header) + + // Save object metadata info. + return ObjectInfo{ + ETag: md5sum, + Key: objectName, + Size: size, + LastModified: date, + ContentType: contentType, + Metadata: metadata, + }, nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/api.go b/src/server/vendor/github.com/minio/minio-go/api.go new file mode 100644 index 00000000000..1231ddf403a --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/api.go @@ -0,0 +1,738 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/http/httputil" + "net/url" + "os" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/minio/minio-go/pkg/s3signer" + "github.com/minio/minio-go/pkg/s3utils" +) + +// Client implements Amazon S3 compatible methods. +type Client struct { + /// Standard options. + + // Parsed endpoint url provided by the user. + endpointURL url.URL + + // AccessKeyID required for authorized requests. + accessKeyID string + // SecretAccessKey required for authorized requests. + secretAccessKey string + // Choose a signature type if necessary. + signature SignatureType + // Set to 'true' if Client has no access and secret keys. + anonymous bool + + // User supplied. + appInfo struct { + appName string + appVersion string + } + + // Indicate whether we are using https or not + secure bool + + // Needs allocation. + httpClient *http.Client + bucketLocCache *bucketLocationCache + + // Advanced functionality. + isTraceEnabled bool + traceOutput io.Writer + + // S3 specific accelerated endpoint. + s3AccelerateEndpoint string + + // Random seed. + random *rand.Rand +} + +// Global constants. +const ( + libraryName = "minio-go" + libraryVersion = "2.0.3" +) + +// User Agent should always following the below style. +// Please open an issue to discuss any new changes here. +// +// Minio (OS; ARCH) LIB/VER APP/VER +const ( + libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") " + libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion +) + +// NewV2 - instantiate minio client with Amazon S3 signature version +// '2' compatibility. +func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure) + if err != nil { + return nil, err + } + // Set to use signature version '2'. + clnt.signature = SignatureV2 + return clnt, nil +} + +// NewV4 - instantiate minio client with Amazon S3 signature version +// '4' compatibility. +func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure) + if err != nil { + return nil, err + } + // Set to use signature version '4'. + clnt.signature = SignatureV4 + return clnt, nil +} + +// New - instantiate minio client Client, adds automatic verification +// of signature. +func New(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure) + if err != nil { + return nil, err + } + // Google cloud storage should be set to signature V2, force it if not. + if s3utils.IsGoogleEndpoint(clnt.endpointURL) { + clnt.signature = SignatureV2 + } + // If Amazon S3 set to signature v2.n + if s3utils.IsAmazonEndpoint(clnt.endpointURL) { + clnt.signature = SignatureV4 + } + return clnt, nil +} + +// lockedRandSource provides protected rand source, implements rand.Source interface. +type lockedRandSource struct { + lk sync.Mutex + src rand.Source +} + +// Int63 returns a non-negative pseudo-random 63-bit integer as an +// int64. +func (r *lockedRandSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +// Seed uses the provided seed value to initialize the generator to a +// deterministic state. +func (r *lockedRandSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// redirectHeaders copies all headers when following a redirect URL. +// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800) +func redirectHeaders(req *http.Request, via []*http.Request) error { + if len(via) == 0 { + return nil + } + for key, val := range via[0].Header { + req.Header[key] = val + } + return nil +} + +func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + // construct endpoint. + endpointURL, err := getEndpointURL(endpoint, secure) + if err != nil { + return nil, err + } + + // instantiate new Client. + clnt := new(Client) + clnt.accessKeyID = accessKeyID + clnt.secretAccessKey = secretAccessKey + if clnt.accessKeyID == "" || clnt.secretAccessKey == "" { + clnt.anonymous = true + } + + // Remember whether we are using https or not + clnt.secure = secure + + // Save endpoint URL, user agent for future uses. + clnt.endpointURL = *endpointURL + + // Instantiate http client and bucket location cache. + clnt.httpClient = &http.Client{ + Transport: http.DefaultTransport, + CheckRedirect: redirectHeaders, + } + + // Instantiae bucket location cache. + clnt.bucketLocCache = newBucketLocationCache() + + // Introduce a new locked random seed. + clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) + + // Return. + return clnt, nil +} + +// SetAppInfo - add application details to user agent. +func (c *Client) SetAppInfo(appName string, appVersion string) { + // if app name and version not set, we do not set a new user agent. + if appName != "" && appVersion != "" { + c.appInfo = struct { + appName string + appVersion string + }{} + c.appInfo.appName = appName + c.appInfo.appVersion = appVersion + } +} + +// SetCustomTransport - set new custom transport. +func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { + // Set this to override default transport + // ``http.DefaultTransport``. + // + // This transport is usually needed for debugging OR to add your + // own custom TLS certificates on the client transport, for custom + // CA's and certs which are not part of standard certificate + // authority follow this example :- + // + // tr := &http.Transport{ + // TLSClientConfig: &tls.Config{RootCAs: pool}, + // DisableCompression: true, + // } + // api.SetTransport(tr) + // + if c.httpClient != nil { + c.httpClient.Transport = customHTTPTransport + } +} + +// TraceOn - enable HTTP tracing. +func (c *Client) TraceOn(outputStream io.Writer) { + // if outputStream is nil then default to os.Stdout. + if outputStream == nil { + outputStream = os.Stdout + } + // Sets a new output stream. + c.traceOutput = outputStream + + // Enable tracing. + c.isTraceEnabled = true +} + +// TraceOff - disable HTTP tracing. +func (c *Client) TraceOff() { + // Disable tracing. + c.isTraceEnabled = false +} + +// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your +// requests. This feature is only specific to S3 for all other endpoints this +// function does nothing. To read further details on s3 transfer acceleration +// please vist - +// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { + if s3utils.IsAmazonEndpoint(c.endpointURL) { + c.s3AccelerateEndpoint = accelerateEndpoint + } +} + +// requestMetadata - is container for all the values to make a request. +type requestMetadata struct { + // If set newRequest presigns the URL. + presignURL bool + + // User supplied. + bucketName string + objectName string + queryValues url.Values + customHeader http.Header + expires int64 + + // Generated by our internal code. + bucketLocation string + contentBody io.Reader + contentLength int64 + contentSHA256Bytes []byte + contentMD5Bytes []byte +} + +// regCred matches credential string in HTTP header +var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") + +// regCred matches signature string in HTTP header +var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") + +// Filter out signature value from Authorization header. +func (c Client) filterSignature(req *http.Request) { + // For anonymous requests, no need to filter. + if c.anonymous { + return + } + // Handle if Signature V2. + if c.signature.isV2() { + // Set a temporary redacted auth + req.Header.Set("Authorization", "AWS **REDACTED**:**REDACTED**") + return + } + + /// Signature V4 authorization header. + + // Save the original auth. + origAuth := req.Header.Get("Authorization") + // Strip out accessKeyID from: + // Credential=////aws4_request + newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") + + // Strip out 256-bit signature from: Signature=<256-bit signature> + newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") + + // Set a temporary redacted auth + req.Header.Set("Authorization", newAuth) + return +} + +// dumpHTTP - dump HTTP request and response. +func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { + // Starts http dump. + _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") + if err != nil { + return err + } + + // Filter out Signature field from Authorization header. + c.filterSignature(req) + + // Only display request header. + reqTrace, err := httputil.DumpRequestOut(req, false) + if err != nil { + return err + } + + // Write request to trace output. + _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) + if err != nil { + return err + } + + // Only display response header. + var respTrace []byte + + // For errors we make sure to dump response body as well. + if resp.StatusCode != http.StatusOK && + resp.StatusCode != http.StatusPartialContent && + resp.StatusCode != http.StatusNoContent { + respTrace, err = httputil.DumpResponse(resp, true) + if err != nil { + return err + } + } else { + // WORKAROUND for https://github.com/golang/go/issues/13942. + // httputil.DumpResponse does not print response headers for + // all successful calls which have response ContentLength set + // to zero. Keep this workaround until the above bug is fixed. + if resp.ContentLength == 0 { + var buffer bytes.Buffer + if err = resp.Header.Write(&buffer); err != nil { + return err + } + respTrace = buffer.Bytes() + respTrace = append(respTrace, []byte("\r\n")...) + } else { + respTrace, err = httputil.DumpResponse(resp, false) + if err != nil { + return err + } + } + } + // Write response to trace output. + _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) + if err != nil { + return err + } + + // Ends the http dump. + _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") + if err != nil { + return err + } + + // Returns success. + return nil +} + +// do - execute http request. +func (c Client) do(req *http.Request) (*http.Response, error) { + var resp *http.Response + var err error + // Do the request in a loop in case of 307 http is met since golang still doesn't + // handle properly this situation (https://github.com/golang/go/issues/7912) + for { + resp, err = c.httpClient.Do(req) + if err != nil { + // Handle this specifically for now until future Golang + // versions fix this issue properly. + urlErr, ok := err.(*url.Error) + if ok && strings.Contains(urlErr.Err.Error(), "EOF") { + return nil, &url.Error{ + Op: urlErr.Op, + URL: urlErr.URL, + Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL), + } + } + return nil, err + } + // Redo the request with the new redirect url if http 307 is returned, quit the loop otherwise + if resp != nil && resp.StatusCode == http.StatusTemporaryRedirect { + newURL, err := url.Parse(resp.Header.Get("Location")) + if err != nil { + break + } + req.URL = newURL + } else { + break + } + } + + // Response cannot be non-nil, report if its the case. + if resp == nil { + msg := "Response is empty. " + reportIssue + return nil, ErrInvalidArgument(msg) + } + + // If trace is enabled, dump http request and response. + if c.isTraceEnabled { + err = c.dumpHTTP(req, resp) + if err != nil { + return nil, err + } + } + return resp, nil +} + +// List of success status. +var successStatus = []int{ + http.StatusOK, + http.StatusNoContent, + http.StatusPartialContent, +} + +// executeMethod - instantiates a given method, and retries the +// request upon any error up to maxRetries attempts in a binomially +// delayed manner using a standard back off algorithm. +func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) { + var isRetryable bool // Indicates if request can be retried. + var bodySeeker io.Seeker // Extracted seeker from io.Reader. + if metadata.contentBody != nil { + // Check if body is seekable then it is retryable. + bodySeeker, isRetryable = metadata.contentBody.(io.Seeker) + } + + // Create a done channel to control 'ListObjects' go routine. + doneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // Blank indentifier is kept here on purpose since 'range' without + // blank identifiers is only supported since go1.4 + // https://golang.org/doc/go1.4#forrange. + for _ = range c.newRetryTimer(MaxRetry, time.Second, time.Second*30, MaxJitter, doneCh) { + // Retry executes the following function body if request has an + // error until maxRetries have been exhausted, retry attempts are + // performed after waiting for a given period of time in a + // binomial fashion. + if isRetryable { + // Seek back to beginning for each attempt. + if _, err = bodySeeker.Seek(0, 0); err != nil { + // If seek failed, no need to retry. + return nil, err + } + } + + // Instantiate a new request. + var req *http.Request + req, err = c.newRequest(method, metadata) + if err != nil { + errResponse := ToErrorResponse(err) + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + return nil, err + } + + // Initiate the request. + res, err = c.do(req) + if err != nil { + // For supported network errors verify. + if isNetErrorRetryable(err) { + continue // Retry. + } + // For other errors, return here no need to retry. + return nil, err + } + + // For any known successful http status, return quickly. + for _, httpStatus := range successStatus { + if httpStatus == res.StatusCode { + return res, nil + } + } + + // Read the body to be saved later. + errBodyBytes, err := ioutil.ReadAll(res.Body) + // res.Body should be closed + closeResponse(res) + if err != nil { + return nil, err + } + // Save the body. + errBodySeeker := bytes.NewReader(errBodyBytes) + res.Body = ioutil.NopCloser(errBodySeeker) + + // For errors verify if its retryable otherwise fail quickly. + errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) + // Bucket region if set in error response, we can retry the + // request with the new region. + if errResponse.Region != "" { + c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) + continue // Retry. + } + + // Verify if error response code is retryable. + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + + // Verify if http status code is retryable. + if isHTTPStatusRetryable(res.StatusCode) { + continue // Retry. + } + + // Save the body back again. + errBodySeeker.Seek(0, 0) // Seek back to starting point. + res.Body = ioutil.NopCloser(errBodySeeker) + + // For all other cases break out of the retry loop. + break + } + return res, err +} + +// newRequest - instantiate a new HTTP request for a given method. +func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) { + // If no method is supplied default to 'POST'. + if method == "" { + method = "POST" + } + + // Default all requests to "us-east-1" or "cn-north-1" (china region) + location := "us-east-1" + if s3utils.IsAmazonChinaEndpoint(c.endpointURL) { + // For china specifically we need to set everything to + // cn-north-1 for now, there is no easier way until AWS S3 + // provides a cleaner compatible API across "us-east-1" and + // China region. + location = "cn-north-1" + } + + // Gather location only if bucketName is present. + if metadata.bucketName != "" { + location, err = c.getBucketLocation(metadata.bucketName) + if err != nil { + return nil, err + } + } + + // Save location. + metadata.bucketLocation = location + + // Construct a new target URL. + targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.bucketLocation, metadata.queryValues) + if err != nil { + return nil, err + } + + // Initialize a new HTTP request for the method. + req, err = http.NewRequest(method, targetURL.String(), nil) + if err != nil { + return nil, err + } + + // Generate presign url if needed, return right here. + if metadata.expires != 0 && metadata.presignURL { + if c.anonymous { + return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.") + } + if c.signature.isV2() { + // Presign URL with signature v2. + req = s3signer.PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires) + } else { + // Presign URL with signature v4. + req = s3signer.PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires) + } + return req, nil + } + + // Set content body if available. + if metadata.contentBody != nil { + req.Body = ioutil.NopCloser(metadata.contentBody) + } + + // FIXME: Enable this when Google Cloud Storage properly supports 100-continue. + // Skip setting 'expect' header for Google Cloud Storage, there + // are some known issues - https://github.com/restic/restic/issues/520 + if !s3utils.IsGoogleEndpoint(c.endpointURL) && c.s3AccelerateEndpoint == "" { + // Set 'Expect' header for the request. + req.Header.Set("Expect", "100-continue") + } + + // Set 'User-Agent' header for the request. + c.setUserAgent(req) + + // Set all headers. + for k, v := range metadata.customHeader { + req.Header.Set(k, v[0]) + } + + // set incoming content-length. + if metadata.contentLength > 0 { + req.ContentLength = metadata.contentLength + } + + // Set sha256 sum only for non anonymous credentials. + if !c.anonymous { + // set sha256 sum for signature calculation only with + // signature version '4'. + if c.signature.isV4() { + shaHeader := unsignedPayload + if !c.secure { + if metadata.contentSHA256Bytes == nil { + shaHeader = hex.EncodeToString(sum256([]byte{})) + } else { + shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes) + } + } + req.Header.Set("X-Amz-Content-Sha256", shaHeader) + } + } + + // set md5Sum for content protection. + if metadata.contentMD5Bytes != nil { + req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes)) + } + + // Sign the request for all authenticated requests. + if !c.anonymous { + if c.signature.isV2() { + // Add signature version '2' authorization header. + req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey) + } else if c.signature.isV4() { + // Add signature version '4' authorization header. + req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location) + } + } + + // Return request. + return req, nil +} + +// set User agent. +func (c Client) setUserAgent(req *http.Request) { + req.Header.Set("User-Agent", libraryUserAgent) + if c.appInfo.appName != "" && c.appInfo.appVersion != "" { + req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) + } +} + +// makeTargetURL make a new target url. +func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) { + host := c.endpointURL.Host + // For Amazon S3 endpoint, try to fetch location based endpoint. + if s3utils.IsAmazonEndpoint(c.endpointURL) { + if c.s3AccelerateEndpoint != "" && bucketName != "" { + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + // Disable transfer acceleration for non-compliant bucket names. + if strings.Contains(bucketName, ".") { + return nil, ErrTransferAccelerationBucket(bucketName) + } + // If transfer acceleration is requested set new host. + // For more details about enabling transfer acceleration read here. + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + host = c.s3AccelerateEndpoint + } else { + // Fetch new host based on the bucket location. + host = getS3Endpoint(bucketLocation) + } + } + + // Save scheme. + scheme := c.endpointURL.Scheme + + urlStr := scheme + "://" + host + "/" + // Make URL only if bucketName is available, otherwise use the + // endpoint URL. + if bucketName != "" { + // Save if target url will have buckets which suppport virtual host. + isVirtualHostStyle := s3utils.IsVirtualHostSupported(c.endpointURL, bucketName) + + // If endpoint supports virtual host style use that always. + // Currently only S3 and Google Cloud Storage would support + // virtual host style. + if isVirtualHostStyle { + urlStr = scheme + "://" + bucketName + "." + host + "/" + if objectName != "" { + urlStr = urlStr + s3utils.EncodePath(objectName) + } + } else { + // If not fall back to using path style. + urlStr = urlStr + bucketName + "/" + if objectName != "" { + urlStr = urlStr + s3utils.EncodePath(objectName) + } + } + } + // If there are any query values, add them to the end. + if len(queryValues) > 0 { + urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) + } + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + return u, nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/appveyor.yml b/src/server/vendor/github.com/minio/minio-go/appveyor.yml new file mode 100644 index 00000000000..a5dc2b226b5 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/appveyor.yml @@ -0,0 +1,37 @@ +# version format +version: "{build}" + +# Operating system (build VM template) +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\minio\minio-go + +# environment variables +environment: + GOPATH: c:\gopath + GO15VENDOREXPERIMENT: 1 + +# scripts that run after cloning repository +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version + - go env + - go get -u github.com/golang/lint/golint + - go get -u github.com/remyoudompheng/go-misc/deadcode + - go get -u github.com/gordonklaus/ineffassign + +# to run your custom scripts instead of automatic MSBuild +build_script: + - go vet ./... + - gofmt -s -l . + - golint github.com/minio/minio-go... + - deadcode + - ineffassign . + - go test -short -v + - go test -short -race -v + +# to disable automatic tests +test: off + +# to disable deployment +deploy: off diff --git a/src/server/vendor/github.com/minio/minio-go/bucket-cache.go b/src/server/vendor/github.com/minio/minio-go/bucket-cache.go new file mode 100644 index 00000000000..c35e26b7c67 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/bucket-cache.go @@ -0,0 +1,197 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/hex" + "net/http" + "net/url" + "path" + "strings" + "sync" + + "github.com/minio/minio-go/pkg/s3signer" + "github.com/minio/minio-go/pkg/s3utils" +) + +// bucketLocationCache - Provides simple mechanism to hold bucket +// locations in memory. +type bucketLocationCache struct { + // mutex is used for handling the concurrent + // read/write requests for cache. + sync.RWMutex + + // items holds the cached bucket locations. + items map[string]string +} + +// newBucketLocationCache - Provides a new bucket location cache to be +// used internally with the client object. +func newBucketLocationCache() *bucketLocationCache { + return &bucketLocationCache{ + items: make(map[string]string), + } +} + +// Get - Returns a value of a given key if it exists. +func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { + r.RLock() + defer r.RUnlock() + location, ok = r.items[bucketName] + return +} + +// Set - Will persist a value into cache. +func (r *bucketLocationCache) Set(bucketName string, location string) { + r.Lock() + defer r.Unlock() + r.items[bucketName] = location +} + +// Delete - Deletes a bucket name from cache. +func (r *bucketLocationCache) Delete(bucketName string) { + r.Lock() + defer r.Unlock() + delete(r.items, bucketName) +} + +// GetBucketLocation - get location for the bucket name from location cache, if not +// fetch freshly by making a new request. +func (c Client) GetBucketLocation(bucketName string) (string, error) { + if err := isValidBucketName(bucketName); err != nil { + return "", err + } + return c.getBucketLocation(bucketName) +} + +// getBucketLocation - Get location for the bucketName from location map cache, if not +// fetch freshly by making a new request. +func (c Client) getBucketLocation(bucketName string) (string, error) { + if err := isValidBucketName(bucketName); err != nil { + return "", err + } + if location, ok := c.bucketLocCache.Get(bucketName); ok { + return location, nil + } + + if s3utils.IsAmazonChinaEndpoint(c.endpointURL) { + // For china specifically we need to set everything to + // cn-north-1 for now, there is no easier way until AWS S3 + // provides a cleaner compatible API across "us-east-1" and + // China region. + return "cn-north-1", nil + } + + // Initialize a new request. + req, err := c.getBucketLocationRequest(bucketName) + if err != nil { + return "", err + } + + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return "", err + } + location, err := processBucketLocationResponse(resp, bucketName) + if err != nil { + return "", err + } + c.bucketLocCache.Set(bucketName, location) + return location, nil +} + +// processes the getBucketLocation http response from the server. +func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) { + if resp != nil { + if resp.StatusCode != http.StatusOK { + err = httpRespToErrorResponse(resp, bucketName, "") + errResp := ToErrorResponse(err) + // For access denied error, it could be an anonymous + // request. Move forward and let the top level callers + // succeed if possible based on their policy. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + return "us-east-1", nil + } + return "", err + } + } + + // Extract location. + var locationConstraint string + err = xmlDecoder(resp.Body, &locationConstraint) + if err != nil { + return "", err + } + + location := locationConstraint + // Location is empty will be 'us-east-1'. + if location == "" { + location = "us-east-1" + } + + // Location can be 'EU' convert it to meaningful 'eu-west-1'. + if location == "EU" { + location = "eu-west-1" + } + + // Save the location into cache. + + // Return. + return location, nil +} + +// getBucketLocationRequest - Wrapper creates a new getBucketLocation request. +func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) { + // Set location query. + urlValues := make(url.Values) + urlValues.Set("location", "") + + // Set get bucket location always as path style. + targetURL := c.endpointURL + targetURL.Path = path.Join(bucketName, "") + "/" + targetURL.RawQuery = urlValues.Encode() + + // Get a new HTTP request for the method. + req, err := http.NewRequest("GET", targetURL.String(), nil) + if err != nil { + return nil, err + } + + // Set UserAgent for the request. + c.setUserAgent(req) + + // Set sha256 sum for signature calculation only with signature version '4'. + if c.signature.isV4() { + var contentSha256 string + if c.secure { + contentSha256 = unsignedPayload + } else { + contentSha256 = hex.EncodeToString(sum256([]byte{})) + } + req.Header.Set("X-Amz-Content-Sha256", contentSha256) + } + + // Sign the request. + if c.signature.isV4() { + req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1") + } else if c.signature.isV2() { + req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey) + } + return req, nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/bucket-notification.go b/src/server/vendor/github.com/minio/minio-go/bucket-notification.go new file mode 100644 index 00000000000..4f60f1c8b34 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/bucket-notification.go @@ -0,0 +1,228 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "reflect" +) + +// NotificationEventType is a S3 notification event associated to the bucket notification configuration +type NotificationEventType string + +// The role of all event types are described in : +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations +const ( + ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*" + ObjectCreatePut = "s3:ObjectCreated:Put" + ObjectCreatedPost = "s3:ObjectCreated:Post" + ObjectCreatedCopy = "s3:ObjectCreated:Copy" + ObjectCreatedCompleteMultipartUpload = "sh:ObjectCreated:CompleteMultipartUpload" + ObjectRemovedAll = "s3:ObjectRemoved:*" + ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" +) + +// FilterRule - child of S3Key, a tag in the notification xml which +// carries suffix/prefix filters +type FilterRule struct { + Name string `xml:"Name"` + Value string `xml:"Value"` +} + +// S3Key - child of Filter, a tag in the notification xml which +// carries suffix/prefix filters +type S3Key struct { + FilterRules []FilterRule `xml:"FilterRule,omitempty"` +} + +// Filter - a tag in the notification xml structure which carries +// suffix/prefix filters +type Filter struct { + S3Key S3Key `xml:"S3Key,omitempty"` +} + +// Arn - holds ARN information that will be sent to the web service, +// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +type Arn struct { + Partition string + Service string + Region string + AccountID string + Resource string +} + +// NewArn creates new ARN based on the given partition, service, region, account id and resource +func NewArn(partition, service, region, accountID, resource string) Arn { + return Arn{Partition: partition, + Service: service, + Region: region, + AccountID: accountID, + Resource: resource} +} + +// Return the string format of the ARN +func (arn Arn) String() string { + return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource +} + +// NotificationConfig - represents one single notification configuration +// such as topic, queue or lambda configuration. +type NotificationConfig struct { + ID string `xml:"Id,omitempty"` + Arn Arn `xml:"-"` + Events []NotificationEventType `xml:"Event"` + Filter *Filter `xml:"Filter,omitempty"` +} + +// NewNotificationConfig creates one notification config and sets the given ARN +func NewNotificationConfig(arn Arn) NotificationConfig { + return NotificationConfig{Arn: arn} +} + +// AddEvents adds one event to the current notification config +func (t *NotificationConfig) AddEvents(events ...NotificationEventType) { + t.Events = append(t.Events, events...) +} + +// AddFilterSuffix sets the suffix configuration to the current notification config +func (t *NotificationConfig) AddFilterSuffix(suffix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "suffix", Value: suffix} + // Replace any suffix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "suffix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// AddFilterPrefix sets the prefix configuration to the current notification config +func (t *NotificationConfig) AddFilterPrefix(prefix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "prefix", Value: prefix} + // Replace any prefix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "prefix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// TopicConfig carries one single topic notification configuration +type TopicConfig struct { + NotificationConfig + Topic string `xml:"Topic"` +} + +// QueueConfig carries one single queue notification configuration +type QueueConfig struct { + NotificationConfig + Queue string `xml:"Queue"` +} + +// LambdaConfig carries one single cloudfunction notification configuration +type LambdaConfig struct { + NotificationConfig + Lambda string `xml:"CloudFunction"` +} + +// BucketNotification - the struct that represents the whole XML to be sent to the web service +type BucketNotification struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` + TopicConfigs []TopicConfig `xml:"TopicConfiguration"` + QueueConfigs []QueueConfig `xml:"QueueConfiguration"` +} + +// AddTopic adds a given topic config to the general bucket notification config +func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) { + newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()} + for _, n := range b.TopicConfigs { + if reflect.DeepEqual(n, newTopicConfig) { + // Avoid adding duplicated entry + return + } + } + b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) +} + +// AddQueue adds a given queue config to the general bucket notification config +func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) { + newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()} + for _, n := range b.QueueConfigs { + if reflect.DeepEqual(n, newQueueConfig) { + // Avoid adding duplicated entry + return + } + } + b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) +} + +// AddLambda adds a given lambda config to the general bucket notification config +func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) { + newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()} + for _, n := range b.LambdaConfigs { + if reflect.DeepEqual(n, newLambdaConfig) { + // Avoid adding duplicated entry + return + } + } + b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) +} + +// RemoveTopicByArn removes all topic configurations that match the exact specified ARN +func (b *BucketNotification) RemoveTopicByArn(arn Arn) { + var topics []TopicConfig + for _, topic := range b.TopicConfigs { + if topic.Topic != arn.String() { + topics = append(topics, topic) + } + } + b.TopicConfigs = topics +} + +// RemoveQueueByArn removes all queue configurations that match the exact specified ARN +func (b *BucketNotification) RemoveQueueByArn(arn Arn) { + var queues []QueueConfig + for _, queue := range b.QueueConfigs { + if queue.Queue != arn.String() { + queues = append(queues, queue) + } + } + b.QueueConfigs = queues +} + +// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN +func (b *BucketNotification) RemoveLambdaByArn(arn Arn) { + var lambdas []LambdaConfig + for _, lambda := range b.LambdaConfigs { + if lambda.Lambda != arn.String() { + lambdas = append(lambdas, lambda) + } + } + b.LambdaConfigs = lambdas +} diff --git a/src/server/vendor/github.com/minio/minio-go/constants.go b/src/server/vendor/github.com/minio/minio-go/constants.go new file mode 100644 index 00000000000..057c3eef4d7 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/constants.go @@ -0,0 +1,52 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +/// Multipart upload defaults. + +// miniPartSize - minimum part size 64MiB per object after which +// putObject behaves internally as multipart. +const minPartSize = 1024 * 1024 * 64 + +// maxPartsCount - maximum number of parts for a single multipart session. +const maxPartsCount = 10000 + +// maxPartSize - maximum part size 5GiB for a single multipart upload +// operation. +const maxPartSize = 1024 * 1024 * 1024 * 5 + +// maxSinglePutObjectSize - maximum size 5GiB of object per PUT +// operation. +const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 + +// maxMultipartPutObjectSize - maximum size 5TiB of object for +// Multipart operation. +const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 + +// optimalReadBufferSize - optimal buffer 5MiB used for reading +// through Read operation. +const optimalReadBufferSize = 1024 * 1024 * 5 + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// Signature related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" +) diff --git a/src/server/vendor/github.com/minio/minio-go/copy-conditions.go b/src/server/vendor/github.com/minio/minio-go/copy-conditions.go new file mode 100644 index 00000000000..5dcdfaef067 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/copy-conditions.go @@ -0,0 +1,97 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + "time" +) + +// copyCondition explanation: +// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html +// +// Example: +// +// copyCondition { +// key: "x-amz-copy-if-modified-since", +// value: "Tue, 15 Nov 1994 12:45:26 GMT", +// } +// +type copyCondition struct { + key string + value string +} + +// CopyConditions - copy conditions. +type CopyConditions struct { + conditions []copyCondition +} + +// NewCopyConditions - Instantiate new list of conditions. +func NewCopyConditions() CopyConditions { + return CopyConditions{ + conditions: make([]copyCondition, 0), + } +} + +// SetMatchETag - set match etag. +func (c *CopyConditions) SetMatchETag(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + c.conditions = append(c.conditions, copyCondition{ + key: "x-amz-copy-source-if-match", + value: etag, + }) + return nil +} + +// SetMatchETagExcept - set match etag except. +func (c *CopyConditions) SetMatchETagExcept(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + c.conditions = append(c.conditions, copyCondition{ + key: "x-amz-copy-source-if-none-match", + value: etag, + }) + return nil +} + +// SetUnmodified - set unmodified time since. +func (c *CopyConditions) SetUnmodified(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Modified since cannot be empty.") + } + c.conditions = append(c.conditions, copyCondition{ + key: "x-amz-copy-source-if-unmodified-since", + value: modTime.Format(http.TimeFormat), + }) + return nil +} + +// SetModified - set modified time since. +func (c *CopyConditions) SetModified(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Modified since cannot be empty.") + } + c.conditions = append(c.conditions, copyCondition{ + key: "x-amz-copy-source-if-modified-since", + value: modTime.Format(http.TimeFormat), + }) + return nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/hook-reader.go b/src/server/vendor/github.com/minio/minio-go/hook-reader.go new file mode 100644 index 00000000000..bc9ece049d4 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/hook-reader.go @@ -0,0 +1,70 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import "io" + +// hookReader hooks additional reader in the source stream. It is +// useful for making progress bars. Second reader is appropriately +// notified about the exact number of bytes read from the primary +// source on each Read operation. +type hookReader struct { + source io.Reader + hook io.Reader +} + +// Seek implements io.Seeker. Seeks source first, and if necessary +// seeks hook if Seek method is appropriately found. +func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { + // Verify for source has embedded Seeker, use it. + sourceSeeker, ok := hr.source.(io.Seeker) + if ok { + return sourceSeeker.Seek(offset, whence) + } + // Verify if hook has embedded Seeker, use it. + hookSeeker, ok := hr.hook.(io.Seeker) + if ok { + return hookSeeker.Seek(offset, whence) + } + return n, nil +} + +// Read implements io.Reader. Always reads from the source, the return +// value 'n' number of bytes are reported through the hook. Returns +// error for all non io.EOF conditions. +func (hr *hookReader) Read(b []byte) (n int, err error) { + n, err = hr.source.Read(b) + if err != nil && err != io.EOF { + return n, err + } + // Progress the hook with the total read bytes from the source. + if _, herr := hr.hook.Read(b[:n]); herr != nil { + if herr != io.EOF { + return n, herr + } + } + return n, err +} + +// newHook returns a io.ReadSeeker which implements hookReader that +// reports the data read from the source to the hook. +func newHook(source, hook io.Reader) io.Reader { + if hook == nil { + return source + } + return &hookReader{source, hook} +} diff --git a/src/server/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go b/src/server/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go new file mode 100644 index 00000000000..078bcd1db68 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go @@ -0,0 +1,115 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import "github.com/minio/minio-go/pkg/set" + +// ConditionKeyMap - map of policy condition key and value. +type ConditionKeyMap map[string]set.StringSet + +// Add - adds key and value. The value is appended If key already exists. +func (ckm ConditionKeyMap) Add(key string, value set.StringSet) { + if v, ok := ckm[key]; ok { + ckm[key] = v.Union(value) + } else { + ckm[key] = set.CopyStringSet(value) + } +} + +// Remove - removes value of given key. If key has empty after removal, the key is also removed. +func (ckm ConditionKeyMap) Remove(key string, value set.StringSet) { + if v, ok := ckm[key]; ok { + if value != nil { + ckm[key] = v.Difference(value) + } + + if ckm[key].IsEmpty() { + delete(ckm, key) + } + } +} + +// RemoveKey - removes key and its value. +func (ckm ConditionKeyMap) RemoveKey(key string) { + if _, ok := ckm[key]; ok { + delete(ckm, key) + } +} + +// CopyConditionKeyMap - returns new copy of given ConditionKeyMap. +func CopyConditionKeyMap(condKeyMap ConditionKeyMap) ConditionKeyMap { + out := make(ConditionKeyMap) + + for k, v := range condKeyMap { + out[k] = set.CopyStringSet(v) + } + + return out +} + +// mergeConditionKeyMap - returns a new ConditionKeyMap which contains merged key/value of given two ConditionKeyMap. +func mergeConditionKeyMap(condKeyMap1 ConditionKeyMap, condKeyMap2 ConditionKeyMap) ConditionKeyMap { + out := CopyConditionKeyMap(condKeyMap1) + + for k, v := range condKeyMap2 { + if ev, ok := out[k]; ok { + out[k] = ev.Union(v) + } else { + out[k] = set.CopyStringSet(v) + } + } + + return out +} + +// ConditionMap - map of condition and conditional values. +type ConditionMap map[string]ConditionKeyMap + +// Add - adds condition key and condition value. The value is appended if key already exists. +func (cond ConditionMap) Add(condKey string, condKeyMap ConditionKeyMap) { + if v, ok := cond[condKey]; ok { + cond[condKey] = mergeConditionKeyMap(v, condKeyMap) + } else { + cond[condKey] = CopyConditionKeyMap(condKeyMap) + } +} + +// Remove - removes condition key and its value. +func (cond ConditionMap) Remove(condKey string) { + if _, ok := cond[condKey]; ok { + delete(cond, condKey) + } +} + +// mergeConditionMap - returns new ConditionMap which contains merged key/value of two ConditionMap. +func mergeConditionMap(condMap1 ConditionMap, condMap2 ConditionMap) ConditionMap { + out := make(ConditionMap) + + for k, v := range condMap1 { + out[k] = CopyConditionKeyMap(v) + } + + for k, v := range condMap2 { + if ev, ok := out[k]; ok { + out[k] = mergeConditionKeyMap(ev, v) + } else { + out[k] = CopyConditionKeyMap(v) + } + } + + return out +} diff --git a/src/server/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/src/server/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go new file mode 100644 index 00000000000..cbb889d8d32 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go @@ -0,0 +1,634 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "reflect" + "strings" + + "github.com/minio/minio-go/pkg/set" +) + +// BucketPolicy - Bucket level policy. +type BucketPolicy string + +// Different types of Policies currently supported for buckets. +const ( + BucketPolicyNone BucketPolicy = "none" + BucketPolicyReadOnly = "readonly" + BucketPolicyReadWrite = "readwrite" + BucketPolicyWriteOnly = "writeonly" +) + +// IsValidBucketPolicy - returns true if policy is valid and supported, false otherwise. +func (p BucketPolicy) IsValidBucketPolicy() bool { + switch p { + case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly: + return true + } + return false +} + +// Resource prefix for all aws resources. +const awsResourcePrefix = "arn:aws:s3:::" + +// Common bucket actions for both read and write policies. +var commonBucketActions = set.CreateStringSet("s3:GetBucketLocation") + +// Read only bucket actions. +var readOnlyBucketActions = set.CreateStringSet("s3:ListBucket") + +// Write only bucket actions. +var writeOnlyBucketActions = set.CreateStringSet("s3:ListBucketMultipartUploads") + +// Read only object actions. +var readOnlyObjectActions = set.CreateStringSet("s3:GetObject") + +// Write only object actions. +var writeOnlyObjectActions = set.CreateStringSet("s3:AbortMultipartUpload", "s3:DeleteObject", "s3:ListMultipartUploadParts", "s3:PutObject") + +// Read and write object actions. +var readWriteObjectActions = readOnlyObjectActions.Union(writeOnlyObjectActions) + +// All valid bucket and object actions. +var validActions = commonBucketActions. + Union(readOnlyBucketActions). + Union(writeOnlyBucketActions). + Union(readOnlyObjectActions). + Union(writeOnlyObjectActions) + +var startsWithFunc = func(resource string, resourcePrefix string) bool { + return strings.HasPrefix(resource, resourcePrefix) +} + +// User - canonical users list. +type User struct { + AWS set.StringSet `json:"AWS,omitempty"` + CanonicalUser set.StringSet `json:"CanonicalUser,omitempty"` +} + +// Statement - minio policy statement +type Statement struct { + Actions set.StringSet `json:"Action"` + Conditions ConditionMap `json:"Condition,omitempty"` + Effect string + Principal User `json:"Principal"` + Resources set.StringSet `json:"Resource"` + Sid string +} + +// BucketAccessPolicy - minio policy collection +type BucketAccessPolicy struct { + Version string // date in YYYY-MM-DD format + Statements []Statement `json:"Statement"` +} + +// isValidStatement - returns whether given statement is valid to process for given bucket name. +func isValidStatement(statement Statement, bucketName string) bool { + if statement.Actions.Intersection(validActions).IsEmpty() { + return false + } + + if statement.Effect != "Allow" { + return false + } + + if statement.Principal.AWS == nil || !statement.Principal.AWS.Contains("*") { + return false + } + + bucketResource := awsResourcePrefix + bucketName + if statement.Resources.Contains(bucketResource) { + return true + } + + if statement.Resources.FuncMatch(startsWithFunc, bucketResource+"/").IsEmpty() { + return false + } + + return true +} + +// Returns new statements with bucket actions for given policy. +func newBucketStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) { + statements = []Statement{} + if policy == BucketPolicyNone || bucketName == "" { + return statements + } + + bucketResource := set.CreateStringSet(awsResourcePrefix + bucketName) + + statement := Statement{ + Actions: commonBucketActions, + Effect: "Allow", + Principal: User{AWS: set.CreateStringSet("*")}, + Resources: bucketResource, + Sid: "", + } + statements = append(statements, statement) + + if policy == BucketPolicyReadOnly || policy == BucketPolicyReadWrite { + statement = Statement{ + Actions: readOnlyBucketActions, + Effect: "Allow", + Principal: User{AWS: set.CreateStringSet("*")}, + Resources: bucketResource, + Sid: "", + } + if prefix != "" { + condKeyMap := make(ConditionKeyMap) + condKeyMap.Add("s3:prefix", set.CreateStringSet(prefix)) + condMap := make(ConditionMap) + condMap.Add("StringEquals", condKeyMap) + statement.Conditions = condMap + } + statements = append(statements, statement) + } + + if policy == BucketPolicyWriteOnly || policy == BucketPolicyReadWrite { + statement = Statement{ + Actions: writeOnlyBucketActions, + Effect: "Allow", + Principal: User{AWS: set.CreateStringSet("*")}, + Resources: bucketResource, + Sid: "", + } + statements = append(statements, statement) + } + + return statements +} + +// Returns new statements contains object actions for given policy. +func newObjectStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) { + statements = []Statement{} + if policy == BucketPolicyNone || bucketName == "" { + return statements + } + + statement := Statement{ + Effect: "Allow", + Principal: User{AWS: set.CreateStringSet("*")}, + Resources: set.CreateStringSet(awsResourcePrefix + bucketName + "/" + prefix + "*"), + Sid: "", + } + + if policy == BucketPolicyReadOnly { + statement.Actions = readOnlyObjectActions + } else if policy == BucketPolicyWriteOnly { + statement.Actions = writeOnlyObjectActions + } else if policy == BucketPolicyReadWrite { + statement.Actions = readWriteObjectActions + } + + statements = append(statements, statement) + return statements +} + +// Returns new statements for given policy, bucket and prefix. +func newStatements(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) { + statements = []Statement{} + ns := newBucketStatement(policy, bucketName, prefix) + statements = append(statements, ns...) + + ns = newObjectStatement(policy, bucketName, prefix) + statements = append(statements, ns...) + + return statements +} + +// Returns whether given bucket statements are used by other than given prefix statements. +func getInUsePolicy(statements []Statement, bucketName string, prefix string) (readOnlyInUse, writeOnlyInUse bool) { + resourcePrefix := awsResourcePrefix + bucketName + "/" + objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*" + + for _, s := range statements { + if !s.Resources.Contains(objectResource) && !s.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() { + if s.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) { + readOnlyInUse = true + } + + if s.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) { + writeOnlyInUse = true + } + } + if readOnlyInUse && writeOnlyInUse { + break + } + } + + return readOnlyInUse, writeOnlyInUse +} + +// Removes object actions in given statement. +func removeObjectActions(statement Statement, objectResource string) Statement { + if statement.Conditions == nil { + if len(statement.Resources) > 1 { + statement.Resources.Remove(objectResource) + } else { + statement.Actions = statement.Actions.Difference(readOnlyObjectActions) + statement.Actions = statement.Actions.Difference(writeOnlyObjectActions) + } + } + + return statement +} + +// Removes bucket actions for given policy in given statement. +func removeBucketActions(statement Statement, prefix string, bucketResource string, readOnlyInUse, writeOnlyInUse bool) Statement { + removeReadOnly := func() { + if !statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) { + return + } + + if statement.Conditions == nil { + statement.Actions = statement.Actions.Difference(readOnlyBucketActions) + return + } + + if prefix != "" { + stringEqualsValue := statement.Conditions["StringEquals"] + values := set.NewStringSet() + if stringEqualsValue != nil { + values = stringEqualsValue["s3:prefix"] + if values == nil { + values = set.NewStringSet() + } + } + + values.Remove(prefix) + + if stringEqualsValue != nil { + if values.IsEmpty() { + delete(stringEqualsValue, "s3:prefix") + } + if len(stringEqualsValue) == 0 { + delete(statement.Conditions, "StringEquals") + } + } + + if len(statement.Conditions) == 0 { + statement.Conditions = nil + statement.Actions = statement.Actions.Difference(readOnlyBucketActions) + } + } + } + + removeWriteOnly := func() { + if statement.Conditions == nil { + statement.Actions = statement.Actions.Difference(writeOnlyBucketActions) + } + } + + if len(statement.Resources) > 1 { + statement.Resources.Remove(bucketResource) + } else { + if !readOnlyInUse { + removeReadOnly() + } + + if !writeOnlyInUse { + removeWriteOnly() + } + } + + return statement +} + +// Returns statements containing removed actions/statements for given +// policy, bucket name and prefix. +func removeStatements(statements []Statement, bucketName string, prefix string) []Statement { + bucketResource := awsResourcePrefix + bucketName + objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*" + readOnlyInUse, writeOnlyInUse := getInUsePolicy(statements, bucketName, prefix) + + out := []Statement{} + readOnlyBucketStatements := []Statement{} + s3PrefixValues := set.NewStringSet() + + for _, statement := range statements { + if !isValidStatement(statement, bucketName) { + out = append(out, statement) + continue + } + + if statement.Resources.Contains(bucketResource) { + if statement.Conditions != nil { + statement = removeBucketActions(statement, prefix, bucketResource, false, false) + } else { + statement = removeBucketActions(statement, prefix, bucketResource, readOnlyInUse, writeOnlyInUse) + } + } else if statement.Resources.Contains(objectResource) { + statement = removeObjectActions(statement, objectResource) + } + + if !statement.Actions.IsEmpty() { + if statement.Resources.Contains(bucketResource) && + statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) && + statement.Effect == "Allow" && + statement.Principal.AWS.Contains("*") { + + if statement.Conditions != nil { + stringEqualsValue := statement.Conditions["StringEquals"] + values := set.NewStringSet() + if stringEqualsValue != nil { + values = stringEqualsValue["s3:prefix"] + if values == nil { + values = set.NewStringSet() + } + } + s3PrefixValues = s3PrefixValues.Union(values.ApplyFunc(func(v string) string { + return bucketResource + "/" + v + "*" + })) + } else if !s3PrefixValues.IsEmpty() { + readOnlyBucketStatements = append(readOnlyBucketStatements, statement) + continue + } + } + out = append(out, statement) + } + } + + skipBucketStatement := true + resourcePrefix := awsResourcePrefix + bucketName + "/" + for _, statement := range out { + if !statement.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() && + s3PrefixValues.Intersection(statement.Resources).IsEmpty() { + skipBucketStatement = false + break + } + } + + for _, statement := range readOnlyBucketStatements { + if skipBucketStatement && + statement.Resources.Contains(bucketResource) && + statement.Effect == "Allow" && + statement.Principal.AWS.Contains("*") && + statement.Conditions == nil { + continue + } + + out = append(out, statement) + } + + if len(out) == 1 { + statement := out[0] + if statement.Resources.Contains(bucketResource) && + statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) && + statement.Effect == "Allow" && + statement.Principal.AWS.Contains("*") && + statement.Conditions == nil { + out = []Statement{} + } + } + + return out +} + +// Appends given statement into statement list to have unique statements. +// - If statement already exists in statement list, it ignores. +// - If statement exists with different conditions, they are merged. +// - Else the statement is appended to statement list. +func appendStatement(statements []Statement, statement Statement) []Statement { + for i, s := range statements { + if s.Actions.Equals(statement.Actions) && + s.Effect == statement.Effect && + s.Principal.AWS.Equals(statement.Principal.AWS) && + reflect.DeepEqual(s.Conditions, statement.Conditions) { + statements[i].Resources = s.Resources.Union(statement.Resources) + return statements + } else if s.Resources.Equals(statement.Resources) && + s.Effect == statement.Effect && + s.Principal.AWS.Equals(statement.Principal.AWS) && + reflect.DeepEqual(s.Conditions, statement.Conditions) { + statements[i].Actions = s.Actions.Union(statement.Actions) + return statements + } + + if s.Resources.Intersection(statement.Resources).Equals(statement.Resources) && + s.Actions.Intersection(statement.Actions).Equals(statement.Actions) && + s.Effect == statement.Effect && + s.Principal.AWS.Intersection(statement.Principal.AWS).Equals(statement.Principal.AWS) { + if reflect.DeepEqual(s.Conditions, statement.Conditions) { + return statements + } + if s.Conditions != nil && statement.Conditions != nil { + if s.Resources.Equals(statement.Resources) { + statements[i].Conditions = mergeConditionMap(s.Conditions, statement.Conditions) + return statements + } + } + } + } + + if !(statement.Actions.IsEmpty() && statement.Resources.IsEmpty()) { + return append(statements, statement) + } + + return statements +} + +// Appends two statement lists. +func appendStatements(statements []Statement, appendStatements []Statement) []Statement { + for _, s := range appendStatements { + statements = appendStatement(statements, s) + } + + return statements +} + +// Returns policy of given bucket statement. +func getBucketPolicy(statement Statement, prefix string) (commonFound, readOnly, writeOnly bool) { + if !(statement.Effect == "Allow" && statement.Principal.AWS.Contains("*")) { + return commonFound, readOnly, writeOnly + } + + if statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) && + statement.Conditions == nil { + commonFound = true + } + + if statement.Actions.Intersection(writeOnlyBucketActions).Equals(writeOnlyBucketActions) && + statement.Conditions == nil { + writeOnly = true + } + + if statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) { + if prefix != "" && statement.Conditions != nil { + if stringEqualsValue, ok := statement.Conditions["StringEquals"]; ok { + if s3PrefixValues, ok := stringEqualsValue["s3:prefix"]; ok { + if s3PrefixValues.Contains(prefix) { + readOnly = true + } + } + } else if stringNotEqualsValue, ok := statement.Conditions["StringNotEquals"]; ok { + if s3PrefixValues, ok := stringNotEqualsValue["s3:prefix"]; ok { + if !s3PrefixValues.Contains(prefix) { + readOnly = true + } + } + } + } else if prefix == "" && statement.Conditions == nil { + readOnly = true + } else if prefix != "" && statement.Conditions == nil { + readOnly = true + } + } + + return commonFound, readOnly, writeOnly +} + +// Returns policy of given object statement. +func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) { + if statement.Effect == "Allow" && + statement.Principal.AWS.Contains("*") && + statement.Conditions == nil { + if statement.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) { + readOnly = true + } + if statement.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) { + writeOnly = true + } + } + + return readOnly, writeOnly +} + +// GetPolicy - Returns policy of given bucket name, prefix in given statements. +func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy { + bucketResource := awsResourcePrefix + bucketName + objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*" + + bucketCommonFound := false + bucketReadOnly := false + bucketWriteOnly := false + matchedResource := "" + objReadOnly := false + objWriteOnly := false + + for _, s := range statements { + matchedObjResources := set.NewStringSet() + if s.Resources.Contains(objectResource) { + matchedObjResources.Add(objectResource) + } else { + matchedObjResources = s.Resources.FuncMatch(resourceMatch, objectResource) + } + + if !matchedObjResources.IsEmpty() { + readOnly, writeOnly := getObjectPolicy(s) + for resource := range matchedObjResources { + if len(matchedResource) < len(resource) { + objReadOnly = readOnly + objWriteOnly = writeOnly + matchedResource = resource + } else if len(matchedResource) == len(resource) { + objReadOnly = objReadOnly || readOnly + objWriteOnly = objWriteOnly || writeOnly + matchedResource = resource + } + } + } else if s.Resources.Contains(bucketResource) { + commonFound, readOnly, writeOnly := getBucketPolicy(s, prefix) + bucketCommonFound = bucketCommonFound || commonFound + bucketReadOnly = bucketReadOnly || readOnly + bucketWriteOnly = bucketWriteOnly || writeOnly + } + } + + policy := BucketPolicyNone + if bucketCommonFound { + if bucketReadOnly && bucketWriteOnly && objReadOnly && objWriteOnly { + policy = BucketPolicyReadWrite + } else if bucketReadOnly && objReadOnly { + policy = BucketPolicyReadOnly + } else if bucketWriteOnly && objWriteOnly { + policy = BucketPolicyWriteOnly + } + } + + return policy +} + +// GetPolicies - returns a map of policies rules of given bucket name, prefix in given statements. +func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy { + policyRules := map[string]BucketPolicy{} + objResources := set.NewStringSet() + // Search all resources related to objects policy + for _, s := range statements { + for r := range s.Resources { + if strings.HasPrefix(r, awsResourcePrefix+bucketName+"/") { + objResources.Add(r) + } + } + } + // Pretend that policy resource as an actual object and fetch its policy + for r := range objResources { + // Put trailing * if exists in asterisk + asterisk := "" + if strings.HasSuffix(r, "*") { + r = r[:len(r)-1] + asterisk = "*" + } + objectPath := r[len(awsResourcePrefix+bucketName)+1 : len(r)] + p := GetPolicy(statements, bucketName, objectPath) + policyRules[bucketName+"/"+objectPath+asterisk] = p + } + return policyRules +} + +// SetPolicy - Returns new statements containing policy of given bucket name and prefix are appended. +func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement { + out := removeStatements(statements, bucketName, prefix) + // fmt.Println("out = ") + // printstatement(out) + ns := newStatements(policy, bucketName, prefix) + // fmt.Println("ns = ") + // printstatement(ns) + + rv := appendStatements(out, ns) + // fmt.Println("rv = ") + // printstatement(rv) + + return rv +} + +// Match function matches wild cards in 'pattern' for resource. +func resourceMatch(pattern, resource string) bool { + if pattern == "" { + return resource == pattern + } + if pattern == "*" { + return true + } + parts := strings.Split(pattern, "*") + if len(parts) == 1 { + return resource == pattern + } + tGlob := strings.HasSuffix(pattern, "*") + end := len(parts) - 1 + if !strings.HasPrefix(resource, parts[0]) { + return false + } + for i := 1; i < end; i++ { + if !strings.Contains(resource, parts[i]) { + return false + } + idx := strings.Index(resource, parts[i]) + len(parts[i]) + resource = resource[idx:] + } + return tGlob || strings.HasSuffix(resource, parts[end]) +} diff --git a/src/server/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go b/src/server/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go new file mode 100644 index 00000000000..e1ec6c02c5a --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go @@ -0,0 +1,324 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV2Algorithm = "AWS" +) + +// Encode input URL path to URL encoded path. +func encodeURL2Path(u *url.URL) (path string) { + // Encode URL path. + if isS3, _ := filepath.Match("*.s3*.amazonaws.com", u.Host); isS3 { + hostSplits := strings.SplitN(u.Host, ".", 4) + // First element is the bucket name. + bucketName := hostSplits[0] + path = "/" + bucketName + path += u.Path + path = s3utils.EncodePath(path) + return + } + if strings.HasSuffix(u.Host, ".storage.googleapis.com") { + path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com") + path += u.Path + path = s3utils.EncodePath(path) + return + } + path = s3utils.EncodePath(u.Path) + return +} + +// PreSignV2 - presign the request in following style. +// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. +func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + d := time.Now().UTC() + // Find epoch expires when the request will expire. + epochExpires := d.Unix() + expires + + // Add expires header if not present. + if expiresStr := req.Header.Get("Expires"); expiresStr == "" { + req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) + } + + // Get presigned string to sign. + stringToSign := preStringifyHTTPReq(req) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Calculate signature. + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + + query := req.URL.Query() + // Handle specially for Google Cloud Storage. + if strings.Contains(req.URL.Host, ".storage.googleapis.com") { + query.Set("GoogleAccessId", accessKeyID) + } else { + query.Set("AWSAccessKeyId", accessKeyID) + } + + // Fill in Expires for presigned query. + query.Set("Expires", strconv.FormatInt(epochExpires, 10)) + + // Encode query and save. + req.URL.RawQuery = s3utils.QueryEncode(query) + + // Save signature finally. + req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) + + // Return. + return &req +} + +// PostPresignSignatureV2 - presigned signature for PostPolicy +// request. +func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(policyBase64)) + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + return signature +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// SignV2 sign the request before Do() (AWS Signature Version 2). +func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + d := time.Now().UTC() + + // Add date if not present. + if date := req.Header.Get("Date"); date == "" { + req.Header.Set("Date", d.Format(http.TimeFormat)) + } + + // Calculate HMAC for secretAccessKey. + stringToSign := stringifyHTTPReq(req) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Prepare auth header. + authHeader := new(bytes.Buffer) + authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) + encoder := base64.NewEncoder(base64.StdEncoding, authHeader) + encoder.Write(hm.Sum(nil)) + encoder.Close() + + // Set Authorization header. + req.Header.Set("Authorization", authHeader.String()) + + return &req +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Expires + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func preStringifyHTTPReq(req http.Request) string { + buf := new(bytes.Buffer) + // Write standard headers. + writePreSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + isPreSign := true + writeCanonicalizedResource(buf, req, isPreSign) + return buf.String() +} + +// writePreSignV2Headers - write preSign v2 required headers. +func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Expires") + "\n") +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func stringifyHTTPReq(req http.Request) string { + buf := new(bytes.Buffer) + // Write standard headers. + writeSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + isPreSign := false + writeCanonicalizedResource(buf, req, isPreSign) + return buf.String() +} + +// writeSignV2Headers - write signV2 required headers. +func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Date") + "\n") +} + +// writeCanonicalizedHeaders - write canonicalized headers. +func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { + var protoHeaders []string + vals := make(map[string][]string) + for k, vv := range req.Header { + // All the AMZ headers should be lowercase + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-amz") { + protoHeaders = append(protoHeaders, lk) + vals[lk] = vv + } + } + sort.Strings(protoHeaders) + for _, k := range protoHeaders { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + if strings.Contains(v, "\n") { + // TODO: "Unfold" long headers that + // span multiple lines (as allowed by + // RFC 2616, section 4.2) by replacing + // the folding white-space (including + // new-line) by a single space. + buf.WriteString(v) + } else { + buf.WriteString(v) + } + } + buf.WriteByte('\n') + } +} + +// The following list is already sorted and should always be, otherwise we could +// have signature-related issues +var resourceList = []string{ + "acl", + "delete", + "location", + "logging", + "notification", + "partNumber", + "policy", + "requestPayment", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// From the Amazon docs: +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign bool) { + // Save request URL. + requestURL := req.URL + // Get encoded URL path. + path := encodeURL2Path(requestURL) + if isPreSign { + // Get encoded URL path. + if len(requestURL.Query()) > 0 { + // Keep the usual queries unescaped for string to sign. + query, _ := url.QueryUnescape(s3utils.QueryEncode(requestURL.Query())) + path = path + "?" + query + } + buf.WriteString(path) + return + } + buf.WriteString(path) + if requestURL.RawQuery != "" { + var n int + vals, _ := url.ParseQuery(requestURL.RawQuery) + // Verify if any sub resource queries are present, if yes + // canonicallize them. + for _, resource := range resourceList { + if vv, ok := vals[resource]; ok && len(vv) > 0 { + n++ + // First element + switch n { + case 1: + buf.WriteByte('?') + // The rest + default: + buf.WriteByte('&') + } + buf.WriteString(resource) + // Request parameters + if len(vv[0]) > 0 { + buf.WriteByte('=') + buf.WriteString(strings.Replace(url.QueryEscape(vv[0]), "+", "%20", -1)) + } + } + } + } +} diff --git a/src/server/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go b/src/server/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go new file mode 100644 index 00000000000..3322b67ccfb --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go @@ -0,0 +1,305 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "bytes" + "encoding/hex" + "net/http" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" + yyyymmdd = "20060102" +) + +/// +/// Excerpts from @lsegal - +/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. +/// +/// User-Agent: +/// +/// This is ignored from signing because signing this causes +/// problems with generating pre-signed URLs (that are executed +/// by other agents) or when customers pass requests through +/// proxies, which may modify the user-agent. +/// +/// Content-Length: +/// +/// This is ignored from signing because generating a pre-signed +/// URL should not provide a content-length constraint, +/// specifically when vending a S3 pre-signed PUT URL. The +/// corollary to this is that when sending regular requests +/// (non-pre-signed), the signature contains a checksum of the +/// body, which implicitly validates the payload length (since +/// changing the number of bytes would change the checksum) +/// and therefore this header is not valuable in the signature. +/// +/// Content-Type: +/// +/// Signing this header causes quite a number of problems in +/// browser environments, where browsers like to modify and +/// normalize the content-type header in different ways. There is +/// more information on this in https://goo.gl/2E9gyy. Avoiding +/// this field simplifies logic and reduces the possibility of +/// future bugs. +/// +/// Authorization: +/// +/// Is skipped for obvious reasons +/// +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secret, loc string, t time.Time) []byte { + date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) + location := sumHMAC(date, []byte(loc)) + service := sumHMAC(location, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getScope generate a string of a specific date, an AWS region, and a +// service. +func getScope(location string, t time.Time) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + location, + "s3", + "aws4_request", + }, "/") + return scope +} + +// GetCredential generate a credential string. +func GetCredential(accessKeyID, location string, t time.Time) string { + scope := getScope(location, t) + return accessKeyID + "/" + scope +} + +// getHashedPayload get the hexadecimal value of the SHA256 hash of +// the request payload. +func getHashedPayload(req http.Request) string { + hashedPayload := req.Header.Get("X-Amz-Content-Sha256") + if hashedPayload == "" { + // Presign does not have a payload, use S3 recommended value. + hashedPayload = unsignedPayload + } + return hashedPayload +} + +// getCanonicalHeaders generate a list of request headers for +// signature. +func getCanonicalHeaders(req http.Request) string { + var headers []string + vals := make(map[string][]string) + for k, vv := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + headers = append(headers, "host") + sort.Strings(headers) + + var buf bytes.Buffer + // Save all the headers in canonical form
: newline + // separated for each header. + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(req.URL.Host) + fallthrough + default: + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } + } + return buf.String() +} + +// getSignedHeaders generate all signed request headers. +// i.e lexically sorted, semicolon-separated list of lowercase +// request header names. +func getSignedHeaders(req http.Request) string { + var headers []string + for k := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // Ignored header found continue. + } + headers = append(headers, strings.ToLower(k)) + } + headers = append(headers, "host") + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getCanonicalRequest generate a canonical request of style. +// +// canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// +func getCanonicalRequest(req http.Request) string { + req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + canonicalRequest := strings.Join([]string{ + req.Method, + s3utils.EncodePath(req.URL.Path), + req.URL.RawQuery, + getCanonicalHeaders(req), + getSignedHeaders(req), + getHashedPayload(req), + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSignV4(t time.Time, location, canonicalRequest string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" + stringToSign = stringToSign + getScope(location, t) + "\n" + stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) + return stringToSign +} + +// PreSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Get credential string. + credential := GetCredential(accessKeyID, location, t) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", signedHeaders) + query.Set("X-Amz-Credential", credential) + req.URL.RawQuery = query.Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest) + + // Gext hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + signature + + return &req +} + +// PostPresignSignatureV4 - presigned signature for PostPolicy +// requests. +func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { + // Get signining key. + signingkey := getSigningKey(secretAccessKey, location, t) + // Calculate signature. + signature := getSignature(signingkey, policyBase64) + return signature +} + +// SignV4 sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. +func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Set x-amz-date. + req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest) + + // Get hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t) + + // Get credential string. + credential := GetCredential(accessKeyID, location, t) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // If regular request, construct the final authorization header. + parts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + + // Set authorization header. + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + return &req +} diff --git a/src/server/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go b/src/server/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go new file mode 100644 index 00000000000..0619b308286 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go @@ -0,0 +1,39 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "crypto/hmac" + "crypto/sha256" +) + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} diff --git a/src/server/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/src/server/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go new file mode 100644 index 00000000000..a3b6ed845e4 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go @@ -0,0 +1,183 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3utils + +import ( + "bytes" + "encoding/hex" + "net" + "net/url" + "regexp" + "sort" + "strings" + "unicode/utf8" +) + +// Sentinel URL is the default url value which is invalid. +var sentinelURL = url.URL{} + +// IsValidDomain validates if input string is a valid domain name. +func IsValidDomain(host string) bool { + // See RFC 1035, RFC 3696. + host = strings.TrimSpace(host) + if len(host) == 0 || len(host) > 255 { + return false + } + // host cannot start or end with "-" + if host[len(host)-1:] == "-" || host[:1] == "-" { + return false + } + // host cannot start or end with "_" + if host[len(host)-1:] == "_" || host[:1] == "_" { + return false + } + // host cannot start or end with a "." + if host[len(host)-1:] == "." || host[:1] == "." { + return false + } + // All non alphanumeric characters are invalid. + if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(percentEncodeSlash(EncodePath(v))) + } + } + return buf.String() +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} diff --git a/src/server/vendor/github.com/minio/minio-go/pkg/set/stringset.go b/src/server/vendor/github.com/minio/minio-go/pkg/set/stringset.go new file mode 100644 index 00000000000..9f33488e01a --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/pkg/set/stringset.go @@ -0,0 +1,196 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package set + +import ( + "encoding/json" + "fmt" + "sort" +) + +// StringSet - uses map as set of strings. +type StringSet map[string]struct{} + +// ToSlice - returns StringSet as string slice. +func (set StringSet) ToSlice() []string { + keys := make([]string, 0, len(set)) + for k := range set { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// IsEmpty - returns whether the set is empty or not. +func (set StringSet) IsEmpty() bool { + return len(set) == 0 +} + +// Add - adds string to the set. +func (set StringSet) Add(s string) { + set[s] = struct{}{} +} + +// Remove - removes string in the set. It does nothing if string does not exist in the set. +func (set StringSet) Remove(s string) { + delete(set, s) +} + +// Contains - checks if string is in the set. +func (set StringSet) Contains(s string) bool { + _, ok := set[s] + return ok +} + +// FuncMatch - returns new set containing each value who passes match function. +// A 'matchFn' should accept element in a set as first argument and +// 'matchString' as second argument. The function can do any logic to +// compare both the arguments and should return true to accept element in +// a set to include in output set else the element is ignored. +func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { + nset := NewStringSet() + for k := range set { + if matchFn(k, matchString) { + nset.Add(k) + } + } + return nset +} + +// ApplyFunc - returns new set containing each value processed by 'applyFn'. +// A 'applyFn' should accept element in a set as a argument and return +// a processed string. The function can do any logic to return a processed +// string. +func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(applyFn(k)) + } + return nset +} + +// Equals - checks whether given set is equal to current set or not. +func (set StringSet) Equals(sset StringSet) bool { + // If length of set is not equal to length of given set, the + // set is not equal to given set. + if len(set) != len(sset) { + return false + } + + // As both sets are equal in length, check each elements are equal. + for k := range set { + if _, ok := sset[k]; !ok { + return false + } + } + + return true +} + +// Intersection - returns the intersection with given set as new set. +func (set StringSet) Intersection(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; ok { + nset.Add(k) + } + } + + return nset +} + +// Difference - returns the difference with given set as new set. +func (set StringSet) Difference(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; !ok { + nset.Add(k) + } + } + + return nset +} + +// Union - returns the union with given set as new set. +func (set StringSet) Union(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(k) + } + + for k := range sset { + nset.Add(k) + } + + return nset +} + +// MarshalJSON - converts to JSON data. +func (set StringSet) MarshalJSON() ([]byte, error) { + return json.Marshal(set.ToSlice()) +} + +// UnmarshalJSON - parses JSON data and creates new set with it. +// If 'data' contains JSON string array, the set contains each string. +// If 'data' contains JSON string, the set contains the string as one element. +// If 'data' contains Other JSON types, JSON parse error is returned. +func (set *StringSet) UnmarshalJSON(data []byte) error { + sl := []string{} + var err error + if err = json.Unmarshal(data, &sl); err == nil { + *set = make(StringSet) + for _, s := range sl { + set.Add(s) + } + } else { + var s string + if err = json.Unmarshal(data, &s); err == nil { + *set = make(StringSet) + set.Add(s) + } + } + + return err +} + +// String - returns printable string of the set. +func (set StringSet) String() string { + return fmt.Sprintf("%s", set.ToSlice()) +} + +// NewStringSet - creates new string set. +func NewStringSet() StringSet { + return make(StringSet) +} + +// CreateStringSet - creates new string set with given string values. +func CreateStringSet(sl ...string) StringSet { + set := make(StringSet) + for _, k := range sl { + set.Add(k) + } + return set +} + +// CopyStringSet - returns copy of given set. +func CopyStringSet(set StringSet) StringSet { + nset := NewStringSet() + for k, v := range set { + nset[k] = v + } + return nset +} diff --git a/src/server/vendor/github.com/minio/minio-go/post-policy.go b/src/server/vendor/github.com/minio/minio-go/post-policy.go new file mode 100644 index 00000000000..5e716124a84 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/post-policy.go @@ -0,0 +1,209 @@ +package minio + +import ( + "encoding/base64" + "fmt" + "strings" + "time" +) + +// expirationDateFormat date format for expiration key in json policy. +const expirationDateFormat = "2006-01-02T15:04:05.999Z" + +// policyCondition explanation: +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// +// Example: +// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } +// +type policyCondition struct { + matchType string + condition string + value string +} + +// PostPolicy - Provides strict static type conversion and validation +// for Amazon S3's POST policy JSON string. +type PostPolicy struct { + // Expiration date and time of the POST policy. + expiration time.Time + // Collection of different policy conditions. + conditions []policyCondition + // ContentLengthRange minimum and maximum allowable size for the + // uploaded content. + contentLengthRange struct { + min int64 + max int64 + } + + // Post form data. + formData map[string]string +} + +// NewPostPolicy - Instantiate new post policy. +func NewPostPolicy() *PostPolicy { + p := &PostPolicy{} + p.conditions = make([]policyCondition, 0) + p.formData = make(map[string]string) + return p +} + +// SetExpires - Sets expiration time for the new policy. +func (p *PostPolicy) SetExpires(t time.Time) error { + if t.IsZero() { + return ErrInvalidArgument("No expiry time set.") + } + p.expiration = t + return nil +} + +// SetKey - Sets an object name for the policy based upload. +func (p *PostPolicy) SetKey(key string) error { + if strings.TrimSpace(key) == "" || key == "" { + return ErrInvalidArgument("Object name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$key", + value: key, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = key + return nil +} + +// SetKeyStartsWith - Sets an object name that an policy based upload +// can start with. +func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { + if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { + return ErrInvalidArgument("Object prefix is empty.") + } + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$key", + value: keyStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = keyStartsWith + return nil +} + +// SetBucket - Sets bucket at which objects will be uploaded to. +func (p *PostPolicy) SetBucket(bucketName string) error { + if strings.TrimSpace(bucketName) == "" || bucketName == "" { + return ErrInvalidArgument("Bucket name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$bucket", + value: bucketName, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["bucket"] = bucketName + return nil +} + +// SetContentType - Sets content-type of the object for this policy +// based upload. +func (p *PostPolicy) SetContentType(contentType string) error { + if strings.TrimSpace(contentType) == "" || contentType == "" { + return ErrInvalidArgument("No content type specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Type", + value: contentType, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Type"] = contentType + return nil +} + +// SetContentLengthRange - Set new min and max content length +// condition for all incoming uploads. +func (p *PostPolicy) SetContentLengthRange(min, max int64) error { + if min > max { + return ErrInvalidArgument("Minimum limit is larger than maximum limit.") + } + if min < 0 { + return ErrInvalidArgument("Minimum limit cannot be negative.") + } + if max < 0 { + return ErrInvalidArgument("Maximum limit cannot be negative.") + } + p.contentLengthRange.min = min + p.contentLengthRange.max = max + return nil +} + +// SetSuccessStatusAction - Sets the status success code of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessStatusAction(status string) error { + if strings.TrimSpace(status) == "" || status == "" { + return ErrInvalidArgument("Status is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_status", + value: status, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_status"] = status + return nil +} + +// addNewPolicy - internal helper to validate adding new policies. +func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { + if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { + return ErrInvalidArgument("Policy fields are empty.") + } + p.conditions = append(p.conditions, policyCond) + return nil +} + +// Stringer interface for printing policy in json formatted string. +func (p PostPolicy) String() string { + return string(p.marshalJSON()) +} + +// marshalJSON - Provides Marshalled JSON in bytes. +func (p PostPolicy) marshalJSON() []byte { + expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` + var conditionsStr string + conditions := []string{} + for _, po := range p.conditions { + conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) + } + if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { + conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", + p.contentLengthRange.min, p.contentLengthRange.max)) + } + if len(conditions) > 0 { + conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" + } + retStr := "{" + retStr = retStr + expirationStr + "," + retStr = retStr + conditionsStr + retStr = retStr + "}" + return []byte(retStr) +} + +// base64 - Produces base64 of PostPolicy's Marshalled json. +func (p PostPolicy) base64() string { + return base64.StdEncoding.EncodeToString(p.marshalJSON()) +} diff --git a/src/server/vendor/github.com/minio/minio-go/retry-continous.go b/src/server/vendor/github.com/minio/minio-go/retry-continous.go new file mode 100644 index 00000000000..e300af69c5a --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/retry-continous.go @@ -0,0 +1,52 @@ +package minio + +import "time" + +// newRetryTimerContinous creates a timer with exponentially increasing delays forever. +func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { + attemptCh := make(chan int) + + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // 1< maxAttempt { + attempt = maxAttempt + } + //sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap + } + if jitter != NoJitter { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + go func() { + defer close(attemptCh) + var nextBackoff int + for { + select { + // Attempts starts. + case attemptCh <- nextBackoff: + nextBackoff++ + case <-doneCh: + // Stop the routine. + return + } + time.Sleep(exponentialBackoffWait(nextBackoff)) + } + }() + return attemptCh +} diff --git a/src/server/vendor/github.com/minio/minio-go/retry.go b/src/server/vendor/github.com/minio/minio-go/retry.go new file mode 100644 index 00000000000..41b70e4748f --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/retry.go @@ -0,0 +1,138 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// MaxRetry is the maximum number of retries before stopping. +var MaxRetry = 5 + +// MaxJitter will randomize over the full exponential backoff time +const MaxJitter = 1.0 + +// NoJitter disables the use of jitter for randomizing the exponential backoff time +const NoJitter = 0.0 + +// newRetryTimer creates a timer with exponentially increasing delays +// until the maximum retry attempts are reached. +func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { + attemptCh := make(chan int) + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + //sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap + } + if jitter != NoJitter { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + go func() { + defer close(attemptCh) + for i := 0; i < maxRetry; i++ { + select { + // Attempts start from 1. + case attemptCh <- i + 1: + case <-doneCh: + // Stop the routine. + return + } + time.Sleep(exponentialBackoffWait(i)) + } + }() + return attemptCh +} + +// isNetErrorRetryable - is network error retryable. +func isNetErrorRetryable(err error) bool { + switch err.(type) { + case net.Error: + switch err.(type) { + case *net.DNSError, *net.OpError, net.UnknownNetworkError: + return true + case *url.Error: + // For a URL error, where it replies back "connection closed" + // retry again. + if strings.Contains(err.Error(), "Connection closed by foreign host") { + return true + } + default: + if strings.Contains(err.Error(), "net/http: TLS handshake timeout") { + // If error is - tlsHandshakeTimeoutError, retry. + return true + } else if strings.Contains(err.Error(), "i/o timeout") { + // If error is - tcp timeoutError, retry. + return true + } + } + } + return false +} + +// List of AWS S3 error codes which are retryable. +var retryableS3Codes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "InternalError": {}, + "ExpiredToken": {}, + "ExpiredTokenException": {}, + // Add more AWS S3 codes here. +} + +// isS3CodeRetryable - is s3 error code retryable. +func isS3CodeRetryable(s3Code string) (ok bool) { + _, ok = retryableS3Codes[s3Code] + return ok +} + +// List of HTTP status codes which are retryable. +var retryableHTTPStatusCodes = map[int]struct{}{ + 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet + http.StatusInternalServerError: {}, + http.StatusBadGateway: {}, + http.StatusServiceUnavailable: {}, + // Add more HTTP status codes here. +} + +// isHTTPStatusRetryable - is HTTP error code retryable. +func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { + _, ok = retryableHTTPStatusCodes[httpStatusCode] + return ok +} diff --git a/src/server/vendor/github.com/minio/minio-go/s3-endpoints.go b/src/server/vendor/github.com/minio/minio-go/s3-endpoints.go new file mode 100644 index 00000000000..d7fa5e03807 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/s3-endpoints.go @@ -0,0 +1,47 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// awsS3EndpointMap Amazon S3 endpoint map. +// "cn-north-1" adds support for AWS China. +var awsS3EndpointMap = map[string]string{ + "us-east-1": "s3.amazonaws.com", + "us-east-2": "s3-us-east-2.amazonaws.com", + "us-west-2": "s3-us-west-2.amazonaws.com", + "us-west-1": "s3-us-west-1.amazonaws.com", + "ca-central-1": "s3.ca-central-1.amazonaws.com", + "eu-west-1": "s3-eu-west-1.amazonaws.com", + "eu-west-2": "s3-eu-west-2.amazonaws.com", + "eu-central-1": "s3-eu-central-1.amazonaws.com", + "ap-south-1": "s3-ap-south-1.amazonaws.com", + "ap-southeast-1": "s3-ap-southeast-1.amazonaws.com", + "ap-southeast-2": "s3-ap-southeast-2.amazonaws.com", + "ap-northeast-1": "s3-ap-northeast-1.amazonaws.com", + "ap-northeast-2": "s3-ap-northeast-2.amazonaws.com", + "sa-east-1": "s3-sa-east-1.amazonaws.com", + "cn-north-1": "s3.cn-north-1.amazonaws.com.cn", +} + +// getS3Endpoint get Amazon S3 endpoint based on the bucket location. +func getS3Endpoint(bucketLocation string) (s3Endpoint string) { + s3Endpoint, ok := awsS3EndpointMap[bucketLocation] + if !ok { + // Default to 's3.amazonaws.com' endpoint. + s3Endpoint = "s3.amazonaws.com" + } + return s3Endpoint +} diff --git a/src/server/vendor/github.com/minio/minio-go/signature-type.go b/src/server/vendor/github.com/minio/minio-go/signature-type.go new file mode 100644 index 00000000000..cae74cd0100 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/signature-type.go @@ -0,0 +1,37 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// SignatureType is type of Authorization requested for a given HTTP request. +type SignatureType int + +// Different types of supported signatures - default is Latest i.e SignatureV4. +const ( + Latest SignatureType = iota + SignatureV4 + SignatureV2 +) + +// isV2 - is signature SignatureV2? +func (s SignatureType) isV2() bool { + return s == SignatureV2 +} + +// isV4 - is signature SignatureV4? +func (s SignatureType) isV4() bool { + return s == SignatureV4 || s == Latest +} diff --git a/src/server/vendor/github.com/minio/minio-go/tempfile.go b/src/server/vendor/github.com/minio/minio-go/tempfile.go new file mode 100644 index 00000000000..65c7b0da167 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/tempfile.go @@ -0,0 +1,60 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "io/ioutil" + "os" + "sync" +) + +// tempFile - temporary file container. +type tempFile struct { + *os.File + mutex *sync.Mutex +} + +// newTempFile returns a new temporary file, once closed it automatically deletes itself. +func newTempFile(prefix string) (*tempFile, error) { + // use platform specific temp directory. + file, err := ioutil.TempFile(os.TempDir(), prefix) + if err != nil { + return nil, err + } + return &tempFile{ + File: file, + mutex: &sync.Mutex{}, + }, nil +} + +// Close - closer wrapper to close and remove temporary file. +func (t *tempFile) Close() error { + t.mutex.Lock() + defer t.mutex.Unlock() + if t.File != nil { + // Close the file. + if err := t.File.Close(); err != nil { + return err + } + // Remove file. + if err := os.Remove(t.File.Name()); err != nil { + return err + } + t.File = nil + } + return nil +} diff --git a/src/server/vendor/github.com/minio/minio-go/utils.go b/src/server/vendor/github.com/minio/minio-go/utils.go new file mode 100644 index 00000000000..93cd1712f93 --- /dev/null +++ b/src/server/vendor/github.com/minio/minio-go/utils.go @@ -0,0 +1,227 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/md5" + "crypto/sha256" + "encoding/xml" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "regexp" + "strings" + "time" + "unicode/utf8" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumMD5 calculate sumMD5 sum for an input byte array. +func sumMD5(data []byte) []byte { + hash := md5.New() + hash.Write(data) + return hash.Sum(nil) +} + +// getEndpointURL - construct a new endpoint. +func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { + if strings.Contains(endpoint, ":") { + host, _, err := net.SplitHostPort(endpoint) + if err != nil { + return nil, err + } + if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, ErrInvalidArgument(msg) + } + } else { + if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, ErrInvalidArgument(msg) + } + } + // If secure is false, use 'http' scheme. + scheme := "https" + if !secure { + scheme = "http" + } + + // Construct a secured endpoint URL. + endpointURLStr := scheme + "://" + endpoint + endpointURL, err := url.Parse(endpointURLStr) + if err != nil { + return nil, err + } + + // Validate incoming endpoint URL. + if err := isValidEndpointURL(*endpointURL); err != nil { + return nil, err + } + return endpointURL, nil +} + +// closeResponse close non nil response with any response Body. +// convenient wrapper to drain any remaining data on response body. +// +// Subsequently this allows golang http RoundTripper +// to re-use the same connection for future requests. +func closeResponse(resp *http.Response) { + // Callers should close resp.Body when done reading from it. + // If resp.Body is not closed, the Client's underlying RoundTripper + // (typically Transport) may not be able to re-use a persistent TCP + // connection to the server for a subsequent "keep-alive" request. + if resp != nil && resp.Body != nil { + // Drain any remaining Body and then close the connection. + // Without this closing connection would disallow re-using + // the same connection for future uses. + // - http://stackoverflow.com/a/17961593/4465767 + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } +} + +// Sentinel URL is the default url value which is invalid. +var sentinelURL = url.URL{} + +// Verify if input endpoint URL is valid. +func isValidEndpointURL(endpointURL url.URL) error { + if endpointURL == sentinelURL { + return ErrInvalidArgument("Endpoint url cannot be empty.") + } + if endpointURL.Path != "/" && endpointURL.Path != "" { + return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.") + } + if strings.Contains(endpointURL.Host, ".amazonaws.com") { + if !s3utils.IsAmazonEndpoint(endpointURL) { + return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") + } + } + if strings.Contains(endpointURL.Host, ".googleapis.com") { + if !s3utils.IsGoogleEndpoint(endpointURL) { + return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") + } + } + return nil +} + +// Verify if input expires value is valid. +func isValidExpiry(expires time.Duration) error { + expireSeconds := int64(expires / time.Second) + if expireSeconds < 1 { + return ErrInvalidArgument("Expires cannot be lesser than 1 second.") + } + if expireSeconds > 604800 { + return ErrInvalidArgument("Expires cannot be greater than 7 days.") + } + return nil +} + +// We support '.' with bucket names but we fallback to using path +// style requests instead for such buckets. +var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) + +// Invalid bucket name with double dot. +var invalidDotBucketName = regexp.MustCompile(`\.\.`) + +// isValidBucketName - verify bucket name in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func isValidBucketName(bucketName string) error { + if strings.TrimSpace(bucketName) == "" { + return ErrInvalidBucketName("Bucket name cannot be empty.") + } + if len(bucketName) < 3 { + return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.") + } + if len(bucketName) > 63 { + return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.") + } + if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' { + return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.") + } + if invalidDotBucketName.MatchString(bucketName) { + return ErrInvalidBucketName("Bucket name cannot have successive periods.") + } + if !validBucketName.MatchString(bucketName) { + return ErrInvalidBucketName("Bucket name contains invalid characters.") + } + return nil +} + +// isValidObjectName - verify object name in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func isValidObjectName(objectName string) error { + if strings.TrimSpace(objectName) == "" { + return ErrInvalidObjectName("Object name cannot be empty.") + } + if len(objectName) > 1024 { + return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.") + } + if !utf8.ValidString(objectName) { + return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.") + } + return nil +} + +// isValidObjectPrefix - verify if object prefix is valid. +func isValidObjectPrefix(objectPrefix string) error { + if len(objectPrefix) > 1024 { + return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.") + } + if !utf8.ValidString(objectPrefix) { + return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.") + } + return nil +} + +// make a copy of http.Header +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// Filter relevant response headers from +// the HEAD, GET http response. The function takes +// a list of headers which are filtered out and +// returned as a new http header. +func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.Header) { + filteredHeader = cloneHeader(header) + for _, key := range filterKeys { + filteredHeader.Del(key) + } + return filteredHeader +} diff --git a/src/server/vendor/vendor.json b/src/server/vendor/vendor.json index e3ef7609517..79cae574eaf 100644 --- a/src/server/vendor/vendor.json +++ b/src/server/vendor/vendor.json @@ -687,6 +687,36 @@ "revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c", "revisionTime": "2016-04-24T11:30:07Z" }, + { + "checksumSHA1": "2FrFoR0UtufaCGRxM4UxP1fR3LM=", + "path": "github.com/minio/minio-go", + "revision": "2d9b5aa27693f8489b04aed103d2ef53fa5ddb4f", + "revisionTime": "2017-01-07T19:26:19Z" + }, + { + "checksumSHA1": "neH34/65OXeKHM/MlV8MbhcdFBc=", + "path": "github.com/minio/minio-go/pkg/policy", + "revision": "2d9b5aa27693f8489b04aed103d2ef53fa5ddb4f", + "revisionTime": "2017-01-07T19:26:19Z" + }, + { + "checksumSHA1": "m/6/na9lVtamkfmIdIOi5pdccgw=", + "path": "github.com/minio/minio-go/pkg/s3signer", + "revision": "2d9b5aa27693f8489b04aed103d2ef53fa5ddb4f", + "revisionTime": "2017-01-07T19:26:19Z" + }, + { + "checksumSHA1": "gRnCFKb4x83GBLVUZXoOjujd+U0=", + "path": "github.com/minio/minio-go/pkg/s3utils", + "revision": "2d9b5aa27693f8489b04aed103d2ef53fa5ddb4f", + "revisionTime": "2017-01-07T19:26:19Z" + }, + { + "checksumSHA1": "maUy+dbN6VfTTnfErrAW2lLit1w=", + "path": "github.com/minio/minio-go/pkg/set", + "revision": "2d9b5aa27693f8489b04aed103d2ef53fa5ddb4f", + "revisionTime": "2017-01-07T19:26:19Z" + }, { "checksumSHA1": "XktPYWgkJB3lGwGHImfqI/JWP4M=", "path": "github.com/opencontainers/runc/libcontainer/cgroups",