diff --git a/Gopkg.lock b/Gopkg.lock index 8a112c9de..8c3d20cfa 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -17,33 +17,23 @@ version = "v0.19.0" [[projects]] - digest = "1:08d820e7e9ef4320e9315e03a78fc952cc84865e9af9fdf6cd8182eac9ec2bc0" - name = "github.com/Azure/azure-sdk-for-go" - packages = [ - "storage", - "version", - ] + digest = "1:d2ccb697dc13c8fbffafa37baae97594d5592ae8f7e113471084137315536e2b" + name = "github.com/Azure/azure-pipeline-go" + packages = ["pipeline"] pruneopts = "NUT" - revision = "56a0b1d2af3b65d5f1f7a330e02faaf48b473c5a" - version = "v14.6.0" + revision = "b8e3409182fd52e74f7d7bdfbff5833591b3b655" + version = "v0.1.8" [[projects]] - digest = "1:9fe4851c1eb1ab8c7486fee4e2d06db0e6509d6772211177e631c1abfb41b720" - name = "github.com/Azure/go-autorest" - packages = [ - "autorest", - "autorest/adal", - "autorest/azure", - "autorest/date", - "logger", - "version", - ] + digest = "1:c4a5edf3b0f38e709a78dcc945997678a364c2b5adfd48842a3dd349c352f833" + name = "github.com/Azure/azure-storage-blob-go" + packages = ["azblob"] pruneopts = "NUT" - revision = "9bc4033dd347c7f416fca46b2f42a043dc1fbdf6" - version = "v10.15.5" + revision = "5152f14ace1c6db66bd9cb57840703a8358fa7bc" + version = "0.3.0" [[projects]] - digest = "1:089e054a48895ade9bdfe1b31b2aaa569c41600cc4dfbd3cd2566e63ff2c4606" + digest = "1:66e9bc21369358ecfe3e1226e1c7040dafb2571b4f857dd9ce2cefb3d21c4dbe" name = "github.com/aws/aws-sdk-go" packages = [ "aws", @@ -82,8 +72,8 @@ "service/sts", ] pruneopts = "NUT" - revision = "333869724b050875e82f4e593488ad2464c41331" - version = "v1.15.83" + revision = "180cc10e5ff368b86dee226b034af7d1672baec6" + version = "v1.15.87" [[projects]] branch = "master" @@ -362,14 +352,6 @@ revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242" version = "v1.0.1" -[[projects]] - digest = "1:13ada91f079028d1b4ca88e10a16439dcfa6541d26ed2e61e770f56d06301933" - name = "github.com/marstr/guid" - packages = ["."] - pruneopts = "NUT" - revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f" - version = "v1.1.0" - [[projects]] digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" name = "github.com/matttproud/golang_protobuf_extensions" @@ -379,11 +361,18 @@ version = "v1.0.1" [[projects]] - digest = "1:cdc5cfc04dd0b98f86433207fe6d9879757c46734441a08886acc11251c7ed4a" + digest = "1:d572b3bb05f19ba06398e950dadeb4e2f9fe2ab08be55440d961b0625f49ec31" name = "github.com/onsi/ginkgo" packages = [ ".", "config", + "extensions/table", + "ginkgo/convert", + "ginkgo/interrupthandler", + "ginkgo/nodot", + "ginkgo/testrunner", + "ginkgo/testsuite", + "ginkgo/watch", "internal/codelocation", "internal/containernode", "internal/failer", @@ -406,11 +395,14 @@ version = "v1.7.0" [[projects]] - digest = "1:0db10c512b410a1ecd8845e31db52622c08b76198f7ab76afb25319c84a7fd4b" + digest = "1:c1f231e749cfbfb59870c4399faf664dd2fb8d7350569149eddaf526450b51c9" name = "github.com/onsi/gomega" packages = [ ".", "format", + "gbytes", + "gexec", + "ghttp", "internal/assertion", "internal/asyncassertion", "internal/oraclematcher", @@ -477,7 +469,7 @@ "xfs", ] pruneopts = "NUT" - revision = "619930b0b4713cc1280189bf0a4c54b3fb506f60" + revision = "aa55a523dc0a8297edf51bb75e8eec13eb3be45d" [[projects]] digest = "1:9dfe20a82800ffcf0a2bde15beba44cc6d6f6cf1d003420ecc8ad0ac78300d70" @@ -486,14 +478,6 @@ pruneopts = "NUT" revision = "2315d5715e36303a941d907f038da7f7c44c773b" -[[projects]] - digest = "1:6bc0652ea6e39e22ccd522458b8bdd8665bf23bdc5a20eec90056e4dc7e273ca" - name = "github.com/satori/go.uuid" - packages = ["."] - pruneopts = "NUT" - revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" - version = "v1.2.0" - [[projects]] digest = "1:d848e2bdc690ea54c4b49894b67a05db318a97ee6561879b814c2c1f82f61406" name = "github.com/sirupsen/logrus" @@ -614,7 +598,7 @@ "ssh/terminal", ] pruneopts = "NUT" - revision = "e657309f52e71501f9934566ac06dc5c2f7f11a1" + revision = "eb0de9b17e854e9b1ccd9963efafc79862359959" [[projects]] branch = "master" @@ -634,11 +618,11 @@ "trace", ] pruneopts = "NUT" - revision = "adae6a3d119ae4890b46832a2e88a95adc62b8e7" + revision = "fae4c4e3ad76c295c3d6d259f898136b4bf833a8" [[projects]] branch = "master" - digest = "1:ae6187c169fc5ec0d8d46483be8ce73e137a6ebb2dac5e1b1280ea04a95e1c2c" + digest = "1:169c27544e7f54a861a05cd84078b494eaf3e41543f0dfd4dd215fa32139cd40" name = "golang.org/x/oauth2" packages = [ ".", @@ -648,18 +632,18 @@ "jwt", ] pruneopts = "NUT" - revision = "8f65e3013ebad444f13bc19536f7865efc793816" + revision = "28207608b83849a028d4f12e46533a6b6894ecaf" [[projects]] branch = "master" - digest = "1:710483a12b766fb0e99ab26cbf6f82df2d958e94cb28890fcb2db96f76d39762" + digest = "1:6ddfd101211f81df3ba1f474baf1c451f7708f01c1e0c4be49cd9f0af03596cf" name = "golang.org/x/sys" packages = [ "unix", "windows", ] pruneopts = "NUT" - revision = "62eef0e2fa9b2c385f7b2778e763486da6880d37" + revision = "4ed8d59d0b35e1e29334a206d1b3f38b1e5dfb31" [[projects]] digest = "1:8a12cbc891b7130d3f660f8a309e5c0b083f831e6ac38cdaa1f12e63c12d6bea" @@ -721,7 +705,7 @@ "transport/http/internal/propagation", ] pruneopts = "NUT" - revision = "bed42c95df7d5f673175b43d85f6b4047efe3814" + revision = "0a71a4356c3f4bcbdd16294c78ca2a31fda36cca" [[projects]] digest = "1:b63b351b57e64ae8aec1af030dc5e74a2676fcda62102f006ae4411fac1b04c8" @@ -752,7 +736,7 @@ "googleapis/rpc/status", ] pruneopts = "NUT" - revision = "b5d43981345bdb2c233eb4bf3277847b48c6fdc6" + revision = "31ac5d88444a9e7ad18077db9a165d793ad06a2e" [[projects]] digest = "1:f112c6f3e9bbd16eb113db8f37adb6bd10270142079d00c7ac625534cbb16475" @@ -800,6 +784,14 @@ source = "https://github.com/fsnotify/fsnotify.git" version = "v1.4.7" +[[projects]] + digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129" + name = "gopkg.in/fsnotify/fsnotify.v1" + packages = ["."] + pruneopts = "NUT" + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + version = "v1.4.7" + [[projects]] branch = "v1" digest = "1:8fb1ccb16a6cfecbfdfeb84d8ea1cc7afa8f9ef16526bc2326f72d993e32cef1" @@ -809,19 +801,20 @@ revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" [[projects]] - digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082" + digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f" name = "gopkg.in/yaml.v2" packages = ["."] pruneopts = "NUT" - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" [solve-meta] analyzer-name = "dep" analyzer-version = 1 input-imports = [ "cloud.google.com/go/storage", - "github.com/Azure/azure-sdk-for-go/storage", + "github.com/Azure/azure-pipeline-go/pipeline", + "github.com/Azure/azure-storage-blob-go/azblob", "github.com/aws/aws-sdk-go/aws", "github.com/aws/aws-sdk-go/aws/request", "github.com/aws/aws-sdk-go/aws/session", @@ -853,12 +846,47 @@ "github.com/gophercloud/gophercloud/pagination", "github.com/gophercloud/gophercloud/testhelper", "github.com/gophercloud/gophercloud/testhelper/client", + "github.com/hpcloud/tail", + "github.com/hpcloud/tail/ratelimiter", + "github.com/hpcloud/tail/util", + "github.com/hpcloud/tail/watch", + "github.com/hpcloud/tail/winfile", "github.com/onsi/ginkgo", + "github.com/onsi/ginkgo/config", + "github.com/onsi/ginkgo/extensions/table", + "github.com/onsi/ginkgo/ginkgo/convert", + "github.com/onsi/ginkgo/ginkgo/interrupthandler", + "github.com/onsi/ginkgo/ginkgo/nodot", + "github.com/onsi/ginkgo/ginkgo/testrunner", + "github.com/onsi/ginkgo/ginkgo/testsuite", + "github.com/onsi/ginkgo/ginkgo/watch", + "github.com/onsi/ginkgo/internal/codelocation", + "github.com/onsi/ginkgo/internal/containernode", + "github.com/onsi/ginkgo/internal/failer", + "github.com/onsi/ginkgo/internal/leafnodes", + "github.com/onsi/ginkgo/internal/remote", + "github.com/onsi/ginkgo/internal/spec", + "github.com/onsi/ginkgo/internal/spec_iterator", + "github.com/onsi/ginkgo/internal/specrunner", + "github.com/onsi/ginkgo/internal/suite", + "github.com/onsi/ginkgo/internal/testingtproxy", + "github.com/onsi/ginkgo/internal/writer", + "github.com/onsi/ginkgo/reporters", + "github.com/onsi/ginkgo/reporters/stenographer", + "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable", + "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty", + "github.com/onsi/ginkgo/types", "github.com/onsi/gomega", + "github.com/onsi/gomega/gbytes", + "github.com/onsi/gomega/gexec", + "github.com/onsi/gomega/ghttp", "github.com/robfig/cron", "github.com/sirupsen/logrus", "github.com/spf13/cobra", + "golang.org/x/sys/unix", "google.golang.org/api/iterator", + "gopkg.in/fsnotify/fsnotify.v1", + "gopkg.in/tomb.v1", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index e708a68b9..d77c6919e 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -59,8 +59,12 @@ required = ["github.com/coreos/bbolt"] version = "0.0.1" [[constraint]] - name = "github.com/Azure/azure-sdk-for-go" - version = "14.6.0" + name = "github.com/Azure/azure-storage-blob-go" + version = "0.3.0" + +[[constraint]] + name = "github.com/Azure/azure-pipeline-go" + version = "v0.1.8" [[constraint]] name = "cloud.google.com/go" diff --git a/LICENSE.md b/LICENSE.md index 16aea55aa..96dcee432 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -256,9 +256,14 @@ Copyright (c) 2013-2014 Onsi Fakhouri. MIT License (https://github.com/onsi/gomega/blob/master/LICENSE) Microsoft Azure SDK for Go. -https://github.com/Azure/azure-sdk-for-go -Copyright 2014-2017 Microsoft -Apache 2 license (https://github.com/Azure/azure-sdk-for-go/blob/master/LICENSE) +https://github.com/Azure/azure-storage-blob-go +Copyright 2017 Microsoft +MIT License (https://github.com/Azure/azure-storage-blob-go/blob/master/LICENSE) + +Microsoft Azure SDK for Go. +https://github.com/Azure/azure-pipeline-go +Copyright 2017 Microsoft +MIT License (https://github.com/Azure/azure-pipeline-go/blob/master/LICENSE) Google Cloud Client Libraries for Go https://github.com/GoogleCloudPlatform/google-cloud-go diff --git a/pkg/server/httpAPI.go b/pkg/server/httpAPI.go index 43da0d7c4..69dbcfc76 100644 --- a/pkg/server/httpAPI.go +++ b/pkg/server/httpAPI.go @@ -144,10 +144,7 @@ func (h *HTTPHandler) serveInitialize(rw http.ResponseWriter, req *http.Request) defer h.initializationStatusMutex.Unlock() if err != nil { h.Logger.Errorf("Failed initialization: %v", err) - rw.WriteHeader(http.StatusInternalServerError) h.initializationStatus = initializationStatusFailed - // Optional: Event if we send start signal it wi - // h.ReqCh <- HandlerSsrStart return } h.Logger.Infof("Successfully initialized data directory \"%s\" for etcd.", h.EtcdInitializer.Validator.Config.DataDir) diff --git a/pkg/snapstore/abs_snapstore.go b/pkg/snapstore/abs_snapstore.go index 19d182956..e808055d4 100644 --- a/pkg/snapstore/abs_snapstore.go +++ b/pkg/snapstore/abs_snapstore.go @@ -15,16 +15,18 @@ package snapstore import ( + "context" "encoding/base64" "fmt" "io" "io/ioutil" + "net/url" "os" "path" "sort" "sync" - "github.com/Azure/azure-sdk-for-go/storage" + "github.com/Azure/azure-storage-blob-go/azblob" "github.com/sirupsen/logrus" ) @@ -35,8 +37,8 @@ const ( // ABSSnapStore is an ABS backed snapstore. type ABSSnapStore struct { - prefix string - container *storage.Container + containerURL *azblob.ContainerURL + prefix string // maxParallelChunkUploads hold the maximum number of parallel chunk uploads allowed. maxParallelChunkUploads int tempDir string @@ -48,38 +50,45 @@ func NewABSSnapStore(container, prefix, tempDir string, maxParallelChunkUploads if err != nil { return nil, err } - storageKey, err := GetEnvVarOrError(absStorageKey) if err != nil { return nil, err } - client, err := storage.NewBasicClient(storageAccount, storageKey) + credentials, err := azblob.NewSharedKeyCredential(storageAccount, storageKey) if err != nil { - return nil, fmt.Errorf("create ABS client failed: %v", err) + return nil, fmt.Errorf("failed to create shared key credentials: %v", err) } - return GetSnapstoreFromClient(container, prefix, tempDir, maxParallelChunkUploads, &client) + p := azblob.NewPipeline(credentials, azblob.PipelineOptions{ + Retry: azblob.RetryOptions{ + TryTimeout: downloadTimeout, + }}) + u, err := url.Parse(fmt.Sprintf("https://%s.%s", storageAccount, AzureBlobStorageHostName)) + if err != nil { + return nil, fmt.Errorf("failed to parse service url: %v", err) + } + serviceURL := azblob.NewServiceURL(*u, p) + containerURL := serviceURL.NewContainerURL(container) + return GetABSSnapstoreFromClient(container, prefix, tempDir, maxParallelChunkUploads, &containerURL) } -// GetSnapstoreFromClient returns a new ABS object for a given container using the supplied storageClient -func GetSnapstoreFromClient(container, prefix, tempDir string, maxParallelChunkUploads int, storageClient *storage.Client) (*ABSSnapStore, error) { - client := storageClient.GetBlobService() - +// GetABSSnapstoreFromClient returns a new ABS object for a given container using the supplied storageClient +func GetABSSnapstoreFromClient(container, prefix, tempDir string, maxParallelChunkUploads int, containerURL *azblob.ContainerURL) (*ABSSnapStore, error) { // Check if supplied container exists - containerRef := client.GetContainerReference(container) - containerExists, err := containerRef.Exists() + ctx, cancel := context.WithTimeout(context.TODO(), providerConnectionTimeout) + defer cancel() + _, err := containerURL.GetProperties(ctx, azblob.LeaseAccessConditions{}) if err != nil { - return nil, err - } - - if !containerExists { - return nil, fmt.Errorf("container %v does not exist", container) + aer, ok := err.(azblob.StorageError) + if !ok || aer.ServiceCode() != azblob.ServiceCodeContainerNotFound { + return nil, fmt.Errorf("failed to get properties of container %v with err, %v", container, aer.Error()) + } + return nil, fmt.Errorf("container %s does not exist", container) } - return &ABSSnapStore{ prefix: prefix, - container: containerRef, + containerURL: containerURL, maxParallelChunkUploads: maxParallelChunkUploads, tempDir: tempDir, }, nil @@ -88,27 +97,35 @@ func GetSnapstoreFromClient(container, prefix, tempDir string, maxParallelChunkU // Fetch should open reader for the snapshot file from store func (a *ABSSnapStore) Fetch(snap Snapshot) (io.ReadCloser, error) { blobName := path.Join(a.prefix, snap.SnapDir, snap.SnapName) - blob := a.container.GetBlobReference(blobName) - opts := &storage.GetBlobOptions{} - return blob.Get(opts) + blob := a.containerURL.NewBlobURL(blobName) + resp, err := blob.Download(context.Background(), io.SeekStart, azblob.CountToEnd, azblob.BlobAccessConditions{}, false) + if err != nil { + return nil, fmt.Errorf("failed to download the blob %s with error:%v", blobName, err) + } + return resp.Body(azblob.RetryReaderOptions{}), nil } // List will list all snapshot files on store func (a *ABSSnapStore) List() (SnapList, error) { - params := storage.ListBlobsParameters{Prefix: path.Join(a.prefix) + "/"} - resp, err := a.container.ListBlobs(params) - if err != nil { - return nil, err - } - var snapList SnapList - for _, blob := range resp.Blobs { - k := (blob.Name)[len(resp.Prefix):] - s, err := ParseSnapshot(k) + opts := azblob.ListBlobsSegmentOptions{Prefix: path.Join(a.prefix, "/")} + for marker := (azblob.Marker{}); marker.NotDone(); { + // Get a result segment starting with the blob indicated by the current Marker. + listBlob, err := a.containerURL.ListBlobsFlatSegment(context.TODO(), marker, opts) if err != nil { - logrus.Warnf("Invalid snapshot found. Ignoring it:%s\n", k) - } else { - snapList = append(snapList, s) + return nil, fmt.Errorf("failed to list the blobs, error: %v", err) + } + marker = listBlob.NextMarker + + // Process the blobs returned in this result segment + for _, blob := range listBlob.Segment.BlobItems { + k := blob.Name[len(a.prefix)+1:] + s, err := ParseSnapshot(k) + if err != nil { + logrus.Warnf("Invalid snapshot found. Ignoring it:%s\n", k) + } else { + snapList = append(snapList, s) + } } } sort.Sort(snapList) @@ -170,16 +187,15 @@ func (a *ABSSnapStore) Save(snap Snapshot, r io.Reader) error { } logrus.Info("All chunk uploaded successfully. Uploading blocklist.") blobName := path.Join(a.prefix, snap.SnapDir, snap.SnapName) - blob := a.container.GetBlobReference(blobName) - var blockList []storage.Block + blob := a.containerURL.NewBlockBlobURL(blobName) + var blockList []string for partNumber := int64(1); partNumber <= noOfChunks; partNumber++ { - block := storage.Block{ - ID: base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%010d", partNumber))), - Status: storage.BlockStatusUncommitted, - } - blockList = append(blockList, block) + blockID := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%010d", partNumber))) + blockList = append(blockList, blockID) } - if err := blob.PutBlockList(blockList, &storage.PutBlockListOptions{}); err != nil { + ctx, cancel := context.WithTimeout(context.TODO(), chunkUploadTimeout) + defer cancel() + if _, err := blob.CommitBlockList(ctx, blockList, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}); err != nil { return fmt.Errorf("failed uploading blocklist for snapshot with error: %v", err) } logrus.Info("Blocklist uploaded successfully.") @@ -197,13 +213,18 @@ func (a *ABSSnapStore) uploadBlock(snap *Snapshot, file *os.File, offset, chunkS size = chunkSize } - sr := io.NewSectionReader(file, offset, chunkSize) + sr := io.NewSectionReader(file, offset, size) blobName := path.Join(a.prefix, snap.SnapDir, snap.SnapName) - blob := a.container.GetBlobReference(blobName) + blob := a.containerURL.NewBlockBlobURL(blobName) partNumber := ((offset / chunkSize) + 1) blockID := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%010d", partNumber))) - err = blob.PutBlockWithLength(blockID, uint64(size), sr, &storage.PutBlockOptions{}) - return err + ctx, cancel := context.WithTimeout(context.TODO(), chunkUploadTimeout) + defer cancel() + var transactionMD5 []byte + if _, err := blob.StageBlock(ctx, blockID, sr, azblob.LeaseAccessConditions{}, transactionMD5); err != nil { + return fmt.Errorf("failed to upload chunk offset: %d, blob: %s, error: %v", offset, blobName, err) + } + return nil } func (a *ABSSnapStore) blockUploader(wg *sync.WaitGroup, stopCh <-chan struct{}, snap *Snapshot, file *os.File, chunkUploadCh chan chunk, errCh chan<- chunkUploadResult) { @@ -229,8 +250,9 @@ func (a *ABSSnapStore) blockUploader(wg *sync.WaitGroup, stopCh <-chan struct{}, // Delete should delete the snapshot file from store func (a *ABSSnapStore) Delete(snap Snapshot) error { blobName := path.Join(a.prefix, snap.SnapDir, snap.SnapName) - blob := a.container.GetBlobReference(blobName) - - opts := &storage.DeleteBlobOptions{} - return blob.Delete(opts) + blob := a.containerURL.NewBlobURL(blobName) + if _, err := blob.Delete(context.TODO(), azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil { + return fmt.Errorf("failed to delete blob %s with error: %v", blobName, err) + } + return nil } diff --git a/pkg/snapstore/abs_snapstore_test.go b/pkg/snapstore/abs_snapstore_test.go new file mode 100644 index 000000000..52f725e16 --- /dev/null +++ b/pkg/snapstore/abs_snapstore_test.go @@ -0,0 +1,301 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package snapstore_test + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "sync" + + "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-storage-blob-go/azblob" + . "github.com/onsi/gomega" + + . "github.com/gardener/etcd-backup-restore/pkg/snapstore" +) + +func newFakeABSSnapstore() SnapStore { + f := []pipeline.Factory{ + pipeline.MethodFactoryMarker(), + newFakePolicyFactory(bucket, prefix, objectMap), + } + p := pipeline.NewPipeline(f, pipeline.Options{HTTPSender: newFakePolicyFactory(bucket, prefix, objectMap)}) + u, err := url.Parse(fmt.Sprintf("https://%s.%s", "dummyaccount", AzureBlobStorageHostName)) + Expect(err).ShouldNot(HaveOccurred()) + serviceURL := azblob.NewServiceURL(*u, p) + containerURL := serviceURL.NewContainerURL(bucket) + a, err := GetABSSnapstoreFromClient(bucket, prefix, "/tmp", 5, &containerURL) + Expect(err).ShouldNot(HaveOccurred()) + return a +} + +// Please follow the link https://github.com/Azure/azure-pipeline-go/blob/master/pipeline/policies_test.go +// for details about details of azure policy, policy factory and pipeline + +// newFakePolicyFactory creates a 'Fake' policy factory. +func newFakePolicyFactory(bucket, prefix string, objectMap map[string]*[]byte) pipeline.Factory { + return &fakePolicyFactory{bucket, prefix, objectMap} +} + +type fakePolicyFactory struct { + bucket string + prefix string + objectMap map[string]*[]byte +} + +// New initializes a Fake policy object. +func (f *fakePolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return &fakePolicy{ + next: next, + po: po, + bucket: f.bucket, + prefix: f.prefix, + objectMap: f.objectMap, + multiPartUploads: make(map[string]map[string][]byte, 0), + } +} + +type fakePolicy struct { + next pipeline.Policy + po *pipeline.PolicyOptions + bucket string + prefix string + objectMap map[string]*[]byte + multiPartUploads map[string]map[string][]byte + multiPartUploadsMutex sync.Mutex +} + +// Do method is called on pipeline to process the request. This will internally call the `Do` method +// on next policies in pipeline and return the responce from it. +func (p *fakePolicy) Do(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { + httpReq, err := http.NewRequest(request.Method, request.URL.String(), request.Body) + if err != nil { + return nil, err + } + httpReq.ContentLength = request.ContentLength + + httpResp := &http.Response{ + Request: httpReq, + } + switch request.Method { + case "GET": + object := parseObjectNamefromURL(request.URL) + if len(object) == 0 { + if err := p.handleContainerGetOperation(httpResp); err != nil { + return nil, err + } + } else { + p.handleBlobGetOperation(httpResp) + } + case "PUT": + p.handleBlobPutOperation(httpResp) + case "DELETE": + p.handleDeleteObject(httpResp) + default: + return nil, err + } + + return pipeline.NewHTTPResponse(httpResp), nil +} + +// handleContainerGetOperation prepares response for Get operation on container. +func (p *fakePolicy) handleContainerGetOperation(w *http.Response) error { + query := w.Request.URL.Query() + comp := query.Get("comp") + switch comp { + case "": + w.StatusCode = http.StatusOK + w.Body = http.NoBody + return nil + case "list": + return p.handleListObjects(w) + default: + return nil + } +} + +// handleListObjectNames responds with a blob `List` response. +func (p *fakePolicy) handleListObjects(w *http.Response) error { + var ( + keys []string + blobs []blobItem + query = w.Request.URL.Query() + prefix = query.Get("prefix") + marker = query.Get("marker") + maxResultsStr = query.Get("maxresults") + limit = 1 //Actually for azure its 5000 + nextMaker string + ) + if len(maxResultsStr) != 0 { + maxResult, err := strconv.Atoi(maxResultsStr) + if err != nil { + return err + } + limit = maxResult + } + + for k := range p.objectMap { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + if strings.Compare(key, marker) > 0 { + blob := blobItem{ + Name: key, + } + blobs = append(blobs, blob) + if len(blobs) == limit { + nextMaker = key + break + } + } + } + + listBlobsFlatSegmentResponse := listBlobsFlatSegmentResponse{ + ServiceEndpoint: fmt.Sprintf("%s://%s", w.Request.URL.Scheme, w.Request.URL.Host), + ContainerName: w.Request.URL.EscapedPath(), + Prefix: prefix, + Marker: marker, + Segment: blobFlatListSegment{ + BlobItems: blobs, + }, + MaxResults: int32(limit), + NextMarker: nextMaker, + } + + rawXML, err := xml.MarshalIndent(listBlobsFlatSegmentResponse, "", "") + if err != nil { + return err + } + + w.Body = ioutil.NopCloser(strings.NewReader(xml.Header + string(rawXML))) + w.StatusCode = http.StatusOK + return nil +} + +// handleBlobCreateOperation responds with a blob `Put` response. +func (p *fakePolicy) handleBlobPutOperation(w *http.Response) { + var ( + query = w.Request.URL.Query() + comp = query.Get("comp") + blockid = query.Get("blockid") + key = parseObjectNamefromURL(w.Request.URL) + ) + + switch comp { + case "block": + content, err := ioutil.ReadAll(w.Request.Body) + if err != nil { + w.StatusCode = http.StatusBadRequest + w.Body = ioutil.NopCloser(strings.NewReader(fmt.Sprintf("failed to read content %v", err))) + return + } + + p.multiPartUploadsMutex.Lock() + blockList, ok := p.multiPartUploads[key] + if !ok { + blockList = make(map[string][]byte, 0) + } + blockList[blockid] = content + p.multiPartUploads[key] = blockList + p.multiPartUploadsMutex.Unlock() + w.StatusCode = http.StatusCreated + + case "blocklist": + content, err := ioutil.ReadAll(w.Request.Body) + if err != nil { + w.StatusCode = http.StatusBadRequest + w.Body = ioutil.NopCloser(strings.NewReader(fmt.Sprintf("failed to read content %v", err))) + return + } + blockLookupXML := strings.TrimPrefix(string(content), xml.Header) + var blockLookupList azblob.BlockLookupList + if err := xml.Unmarshal([]byte(blockLookupXML), &blockLookupList); err != nil { + w.StatusCode = http.StatusBadRequest + w.Body = ioutil.NopCloser(strings.NewReader(fmt.Sprintf("failed to parse body %v", err))) + return + } + blockContentMap := p.multiPartUploads[key] + content = make([]byte, 0) + for _, blockID := range blockLookupList.Latest { + content = append(content, blockContentMap[blockID]...) + } + p.objectMap[key] = &content + w.StatusCode = http.StatusCreated + } + w.Body = http.NoBody +} + +// handleBlobGetOperation on GET request `/testContainer/testObject` responds with a `Get` response. +func (p *fakePolicy) handleBlobGetOperation(w *http.Response) { + key := parseObjectNamefromURL(w.Request.URL) + if _, ok := p.objectMap[key]; ok { + w.StatusCode = http.StatusOK + w.Body = ioutil.NopCloser(bytes.NewReader(*p.objectMap[key])) + } else { + w.StatusCode = http.StatusNotFound + w.Body = http.NoBody + } +} + +// handleDeleteObject on delete request `/testContainer/testObject` responds with a `Delete` response. +func (p *fakePolicy) handleDeleteObject(w *http.Response) { + key := parseObjectNamefromURL(w.Request.URL) + if _, ok := p.objectMap[key]; ok { + delete(p.objectMap, key) + w.StatusCode = http.StatusAccepted + } else { + w.StatusCode = http.StatusNotFound + } + w.Body = http.NoBody +} + +///////////////////////////////////////////// +// Truncated azblob types for XML encoding // +///////////////////////////////////////////// + +type blobItem struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Blob"` + Name string `xml:"Name"` + Properties azblob.BlobProperties `xml:"Properties"` +} + +// listBlobsFlatSegmentResponse - An enumeration of blobs +type listBlobsFlatSegmentResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + ContainerName string `xml:"ContainerName,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + MaxResults int32 `xml:"MaxResults"` + Segment blobFlatListSegment `xml:"Blobs"` + NextMarker string `xml:"NextMarker"` +} + +// blobFlatListSegment ... +type blobFlatListSegment struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Blobs"` + BlobItems []blobItem `xml:"Blob"` +} diff --git a/pkg/snapstore/s3_snapstore_test.go b/pkg/snapstore/s3_snapstore_test.go index 505bf0b8b..14b0c3e17 100644 --- a/pkg/snapstore/s3_snapstore_test.go +++ b/pkg/snapstore/s3_snapstore_test.go @@ -73,20 +73,21 @@ func (m *mockS3Client) CreateMultipartUploadWithContext(ctx aws.Context, in *s3. } func (m *mockS3Client) UploadPartWithContext(ctx aws.Context, in *s3.UploadPartInput, opts ...request.Option) (*s3.UploadPartOutput, error) { - if m.multiPartUploads[*in.UploadId] == nil { - return nil, fmt.Errorf("multipart upload not initiated") - } if *in.PartNumber < 0 { return nil, fmt.Errorf("part number should be positive integer") } + m.multiPartUploadsMutex.Lock() + if m.multiPartUploads[*in.UploadId] == nil { + m.multiPartUploadsMutex.Unlock() + return nil, fmt.Errorf("multipart upload not initiated") + } if *in.PartNumber > int64(len(*m.multiPartUploads[*in.UploadId])) { - m.multiPartUploadsMutex.Lock() t := make([][]byte, *in.PartNumber) copy(t, *m.multiPartUploads[*in.UploadId]) delete(m.multiPartUploads, *in.UploadId) m.multiPartUploads[*in.UploadId] = &t - m.multiPartUploadsMutex.Unlock() } + m.multiPartUploadsMutex.Unlock() temp, err := ioutil.ReadAll(in.Body) if err != nil { return nil, fmt.Errorf("failed to read complete body %v", err) diff --git a/pkg/snapstore/snapstore_test.go b/pkg/snapstore/snapstore_test.go index d0dbfc830..8fc0f45fc 100644 --- a/pkg/snapstore/snapstore_test.go +++ b/pkg/snapstore/snapstore_test.go @@ -16,16 +16,18 @@ package snapstore_test import ( "bytes" + "fmt" "io/ioutil" + "net/url" "path" + "strings" "time" - "github.com/sirupsen/logrus" - . "github.com/gardener/etcd-backup-restore/pkg/snapstore" fake "github.com/gophercloud/gophercloud/testhelper/client" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/sirupsen/logrus" ) var ( @@ -80,6 +82,7 @@ var _ = Describe("Snapstore", func() { multiPartUploads: map[string]*[][]byte{}, }), "swift": NewSwiftSnapstoreFromClient(bucket, prefix, "/tmp", 5, fake.ServiceClient()), + "ABS": newFakeABSSnapstore(), } }) @@ -148,3 +151,17 @@ func resetObjectMap() { delete(objectMap, k) } } + +func parseObjectNamefromURL(u *url.URL) string { + path := u.EscapedPath() + if strings.HasPrefix(path, fmt.Sprintf("/%s", bucket)) { + splits := strings.SplitAfterN(path, fmt.Sprintf("/%s", bucket), 2) + if len(splits[1]) == 0 { + return "" + } + return splits[1][1:] + } else { + logrus.Errorf("path should start with /%s: but received %s", bucket, u.String()) + return "" + } +} diff --git a/pkg/snapstore/swift_snapstore_test.go b/pkg/snapstore/swift_snapstore_test.go index 7925b79b8..929de723f 100644 --- a/pkg/snapstore/swift_snapstore_test.go +++ b/pkg/snapstore/swift_snapstore_test.go @@ -20,7 +20,6 @@ import ( "io" "io/ioutil" "net/http" - "net/url" "sort" "strings" "sync" @@ -59,20 +58,6 @@ func initializeMockSwiftServer(t *testing.T) { }) } -func parseObjectNamefromURL(u *url.URL) string { - path := u.EscapedPath() - if strings.HasPrefix(path, fmt.Sprintf("/%s", bucket)) { - splits := strings.SplitAfterN(path, fmt.Sprintf("/%s", bucket), 2) - if len(splits[1]) == 0 { - return "" - } - return splits[1][1:] - } else { - fmt.Printf("path should start with /%s", bucket) - return "" - } -} - // handleCreateTextObject creates an HTTP handler at `/testContainer/testObject` on the test handler mux // that responds with a `Create` response. func handleCreateTextObject(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/snapstore/types.go b/pkg/snapstore/types.go index d874c85c0..f83aad7c8 100644 --- a/pkg/snapstore/types.go +++ b/pkg/snapstore/types.go @@ -24,45 +24,52 @@ import ( // Only purpose of these implementation to provide CPI layer to // access files. type SnapStore interface { - // Fetch should open reader for the snapshot file from store + // Fetch should open reader for the snapshot file from store. Fetch(Snapshot) (io.ReadCloser, error) - // List will list all snapshot files on store + // List will list all snapshot files on store. List() (SnapList, error) - // Save will write the snapshot to store + // Save will write the snapshot to store. Save(Snapshot, io.Reader) error - // Delete should delete the snapshot file from store + // Delete should delete the snapshot file from store. Delete(Snapshot) error } const ( - // minChunkSize is set to 5Mib since AWS doesn't allow chunk size less than that + // minChunkSize is set to 5Mib since it is lower chunk size limit for AWS. minChunkSize int64 = 5 * (1 << 20) //5 MiB - // SnapstoreProviderLocal is constant for local disk storage provider + // SnapstoreProviderLocal is constant for local disk storage provider. SnapstoreProviderLocal = "Local" - // SnapstoreProviderS3 is constant for aws S3 storage provider + // SnapstoreProviderS3 is constant for aws S3 storage provider. SnapstoreProviderS3 = "S3" - // SnapstoreProviderABS is constant for azure blob storage provider + // SnapstoreProviderABS is constant for azure blob storage provider. SnapstoreProviderABS = "ABS" - // SnapstoreProviderGCS is constant for GCS object storage provider + // SnapstoreProviderGCS is constant for GCS object storage provider. SnapstoreProviderGCS = "GCS" - // SnapstoreProviderSwift is constant for Swift object storage + // SnapstoreProviderSwift is constant for Swift object storage. SnapstoreProviderSwift = "Swift" - // SnapshotKindFull is constant for full snapshot kind + // SnapshotKindFull is constant for full snapshot kind. SnapshotKindFull = "Full" - // SnapshotKindDelta is constant for delta snapshot kind + // SnapshotKindDelta is constant for delta snapshot kind. SnapshotKindDelta = "Incr" - // ChunkUploadTimeout is timeout for uploading chunk + // chunkUploadTimeout is timeout for uploading chunk. chunkUploadTimeout = 180 * time.Second + // providerConnectionTimeout is timeout for connection/short queries to cloud provider. + providerConnectionTimeout = 30 * time.Second + // downloadTimeout is timeout for downloading chunk. + downloadTimeout = 5 * time.Minute tmpBackupFilePrefix = "etcd-backup-" // maxRetryAttempts indicates the number of attempts to be retried in case of failure to upload chunk. maxRetryAttempts = 5 + + // AzureBlobStorageHostName is the host name for azure blob storage service. + AzureBlobStorageHostName = "blob.core.windows.net" ) -// Snapshot structure represents the metadata of snapshot +// Snapshot structure represents the metadata of snapshot.s type Snapshot struct { Kind string //incr:incremental,full:full StartRevision int64 @@ -73,7 +80,7 @@ type Snapshot struct { IsChunk bool } -// SnapList is list of snapshots +// SnapList is list of snapshots. type SnapList []*Snapshot // Config defines the configuration to create snapshot store. diff --git a/pkg/snapstore/utils.go b/pkg/snapstore/utils.go index 092eae14d..e71ddfc6d 100644 --- a/pkg/snapstore/utils.go +++ b/pkg/snapstore/utils.go @@ -33,6 +33,21 @@ func GetSnapstore(config *Config) (SnapStore, error) { if config.Container == "" { config.Container = os.Getenv(envStorageContainer) } + + if len(config.TempDir) == 0 { + config.TempDir = path.Join("/tmp") + } + if _, err := os.Stat(config.TempDir); err != nil { + if os.IsNotExist(err) { + logrus.Infof("Temporary directory %s does not exit. Creating it...", config.TempDir) + if err := os.MkdirAll(config.TempDir, 0700); err != nil { + return nil, fmt.Errorf("failed to create temporary directory %s: %v", config.TempDir, err) + } + } else { + return nil, fmt.Errorf("failed to get file info of temporary directory %s: %v", config.TempDir, err) + } + } + if config.MaxParallelChunkUploads <= 0 { config.MaxParallelChunkUploads = 5 } diff --git a/vendor/github.com/Azure/azure-pipeline-go/LICENSE b/vendor/github.com/Azure/azure-pipeline-go/LICENSE new file mode 100644 index 000000000..d1ca00f20 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go new file mode 100755 index 000000000..0dde81d72 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go @@ -0,0 +1,255 @@ +package pipeline + +import ( + "context" + "net" + "net/http" + "os" + "time" +) + +// The Factory interface represents an object that can create its Policy object. Each HTTP request sent +// requires that this Factory create a new instance of its Policy object. +type Factory interface { + New(next Policy, po *PolicyOptions) Policy +} + +// FactoryFunc is an adapter that allows the use of an ordinary function as a Factory interface. +type FactoryFunc func(next Policy, po *PolicyOptions) PolicyFunc + +// New calls f(next,po). +func (f FactoryFunc) New(next Policy, po *PolicyOptions) Policy { + return f(next, po) +} + +// The Policy interface represents a mutable Policy object created by a Factory. The object can mutate/process +// the HTTP request and then forward it on to the next Policy object in the linked-list. The returned +// Response goes backward through the linked-list for additional processing. +// NOTE: Request is passed by value so changes do not change the caller's version of +// the request. However, Request has some fields that reference mutable objects (not strings). +// These references are copied; a deep copy is not performed. Specifically, this means that +// you should avoid modifying the objects referred to by these fields: URL, Header, Body, +// GetBody, TransferEncoding, Form, MultipartForm, Trailer, TLS, Cancel, and Response. +type Policy interface { + Do(ctx context.Context, request Request) (Response, error) +} + +// PolicyFunc is an adapter that allows the use of an ordinary function as a Policy interface. +type PolicyFunc func(ctx context.Context, request Request) (Response, error) + +// Do calls f(ctx, request). +func (f PolicyFunc) Do(ctx context.Context, request Request) (Response, error) { + return f(ctx, request) +} + +// Options configures a Pipeline's behavior. +type Options struct { + HTTPSender Factory // If sender is nil, then the pipeline's default client is used to send the HTTP requests. + Log LogOptions +} + +// LogLevel tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LogLevel uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LogLevel = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug +) + +// LogOptions configures the pipeline's logging mechanism & level filtering. +type LogOptions struct { + Log func(level LogLevel, message string) + + // ShouldLog is called periodically allowing you to return whether the specified LogLevel should be logged or not. + // An application can return different values over the its lifetime; this allows the application to dynamically + // alter what is logged. NOTE: This method can be called by multiple goroutines simultaneously so make sure + // you implement it in a goroutine-safe way. If nil, nothing is logged (the equivalent of returning LogNone). + // Usually, the function will be implemented simply like this: return level <= LogWarning + ShouldLog func(level LogLevel) bool +} + +type pipeline struct { + factories []Factory + options Options +} + +// The Pipeline interface represents an ordered list of Factory objects and an object implementing the HTTPSender interface. +// You construct a Pipeline by calling the pipeline.NewPipeline function. To send an HTTP request, call pipeline.NewRequest +// and then call Pipeline's Do method passing a context, the request, and a method-specific Factory (or nil). Passing a +// method-specific Factory allows this one call to Do to inject a Policy into the linked-list. The policy is injected where +// the MethodFactoryMarker (see the pipeline.MethodFactoryMarker function) is in the slice of Factory objects. +// +// When Do is called, the Pipeline object asks each Factory object to construct its Policy object and adds each Policy to a linked-list. +// THen, Do sends the Context and Request through all the Policy objects. The final Policy object sends the request over the network +// (via the HTTPSender object passed to NewPipeline) and the response is returned backwards through all the Policy objects. +// Since Pipeline and Factory objects are goroutine-safe, you typically create 1 Pipeline object and reuse it to make many HTTP requests. +type Pipeline interface { + Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) +} + +// NewPipeline creates a new goroutine-safe Pipeline object from the slice of Factory objects and the specified options. +func NewPipeline(factories []Factory, o Options) Pipeline { + if o.HTTPSender == nil { + o.HTTPSender = newDefaultHTTPClientFactory() + } + if o.Log.Log == nil { + o.Log.Log = func(LogLevel, string) {} // No-op logger + } + return &pipeline{factories: factories, options: o} +} + +// Do is called for each and every HTTP request. It tells each Factory to create its own (mutable) Policy object +// replacing a MethodFactoryMarker factory (if it exists) with the methodFactory passed in. Then, the Context and Request +// are sent through the pipeline of Policy objects (which can transform the Request's URL/query parameters/headers) and +// ultimately sends the transformed HTTP request over the network. +func (p *pipeline) Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) { + response, err := p.newPolicies(methodFactory).Do(ctx, request) + request.close() + return response, err +} + +func (p *pipeline) newPolicies(methodFactory Factory) Policy { + // The last Policy is the one that actually sends the request over the wire and gets the response. + // It is overridable via the Options' HTTPSender field. + po := &PolicyOptions{pipeline: p} // One object shared by all policy objects + next := p.options.HTTPSender.New(nil, po) + + // Walk over the slice of Factory objects in reverse (from wire to API) + markers := 0 + for i := len(p.factories) - 1; i >= 0; i-- { + factory := p.factories[i] + if _, ok := factory.(methodFactoryMarker); ok { + markers++ + if markers > 1 { + panic("MethodFactoryMarker can only appear once in the pipeline") + } + if methodFactory != nil { + // Replace MethodFactoryMarker with passed-in methodFactory + next = methodFactory.New(next, po) + } + } else { + // Use the slice's Factory to construct its Policy + next = factory.New(next, po) + } + } + + // Each Factory has created its Policy + if markers == 0 && methodFactory != nil { + panic("Non-nil methodFactory requires MethodFactoryMarker in the pipeline") + } + return next // Return head of the Policy object linked-list +} + +// A PolicyOptions represents optional information that can be used by a node in the +// linked-list of Policy objects. A PolicyOptions is passed to the Factory's New method +// which passes it (if desired) to the Policy object it creates. Today, the Policy object +// uses the options to perform logging. But, in the future, this could be used for more. +type PolicyOptions struct { + pipeline *pipeline +} + +// ShouldLog returns true if the specified log level should be logged. +func (po *PolicyOptions) ShouldLog(level LogLevel) bool { + if po.pipeline.options.Log.ShouldLog != nil { + return po.pipeline.options.Log.ShouldLog(level) + } + return false +} + +// Log logs a string to the Pipeline's Logger. +func (po *PolicyOptions) Log(level LogLevel, msg string) { + if !po.ShouldLog(level) { + return // Short circuit message formatting if we're not logging it + } + + // We are logging it, ensure trailing newline + if len(msg) == 0 || msg[len(msg)-1] != '\n' { + msg += "\n" // Ensure trailing newline + } + po.pipeline.options.Log.Log(level, msg) + + // If logger doesn't handle fatal/panic, we'll do it here. + if level == LogFatal { + os.Exit(1) + } else if level == LogPanic { + panic(msg) + } +} + +var pipelineHTTPClient = newDefaultHTTPClient() + +func newDefaultHTTPClient() *http.Client { + // We want the Transport to have a large connection pool + return &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + // We use Dial instead of DialContext as DialContext has been reported to cause slower performance. + Dial /*Context*/ : (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, /*Context*/ + MaxIdleConns: 0, // No limit + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableKeepAlives: false, + DisableCompression: false, + MaxResponseHeaderBytes: 0, + //ResponseHeaderTimeout: time.Duration{}, + //ExpectContinueTimeout: time.Duration{}, + }, + } +} + +// newDefaultHTTPClientFactory creates a DefaultHTTPClientPolicyFactory object that sends HTTP requests to a Go's default http.Client. +func newDefaultHTTPClientFactory() Factory { + return FactoryFunc(func(next Policy, po *PolicyOptions) PolicyFunc { + return func(ctx context.Context, request Request) (Response, error) { + r, err := pipelineHTTPClient.Do(request.WithContext(ctx)) + if err != nil { + err = NewError(err, "HTTP request failed") + } + return NewHTTPResponse(r), err + } + }) +} + +var mfm = methodFactoryMarker{} // Singleton + +// MethodFactoryMarker returns a special marker Factory object. When Pipeline's Do method is called, any +// MethodMarkerFactory object is replaced with the specified methodFactory object. If nil is passed fro Do's +// methodFactory parameter, then the MethodFactoryMarker is ignored as the linked-list of Policy objects is created. +func MethodFactoryMarker() Factory { + return mfm +} + +type methodFactoryMarker struct { +} + +func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy { + panic("methodFactoryMarker policy should have been replaced with a method policy") +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go new file mode 100755 index 000000000..d0bb77407 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go @@ -0,0 +1,33 @@ +// +build !windows,!nacl,!plan9 + +package pipeline + +import ( + "log" + "log/syslog" +) + +// ForceLog should rarely be used. It forceable logs an entry to the +// Windows Event Log (on Windows) or to the SysLog (on Linux) +func ForceLog(level LogLevel, msg string) { + if defaultLogger == nil { + return // Return fast if we failed to create the logger. + } + // We are logging it, ensure trailing newline + if len(msg) == 0 || msg[len(msg)-1] != '\n' { + msg += "\n" // Ensure trailing newline + } + switch level { + case LogFatal: + defaultLogger.Fatal(msg) + case LogPanic: + defaultLogger.Panic(msg) + case LogError, LogWarning, LogInfo: + defaultLogger.Print(msg) + } +} + +var defaultLogger = func() *log.Logger { + l, _ := syslog.NewLogger(syslog.LOG_USER|syslog.LOG_WARNING, log.LstdFlags) + return l +}() diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go new file mode 100755 index 000000000..cb6739899 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go @@ -0,0 +1,61 @@ +package pipeline + +import ( + "os" + "syscall" + "unsafe" +) + +// ForceLog should rarely be used. It forceable logs an entry to the +// Windows Event Log (on Windows) or to the SysLog (on Linux) +func ForceLog(level LogLevel, msg string) { + var el eventType + switch level { + case LogError, LogFatal, LogPanic: + el = elError + case LogWarning: + el = elWarning + case LogInfo: + el = elInfo + } + // We are logging it, ensure trailing newline + if len(msg) == 0 || msg[len(msg)-1] != '\n' { + msg += "\n" // Ensure trailing newline + } + reportEvent(el, 0, msg) +} + +type eventType int16 + +const ( + elSuccess eventType = 0 + elError eventType = 1 + elWarning eventType = 2 + elInfo eventType = 4 +) + +var reportEvent = func() func(eventType eventType, eventID int32, msg string) { + advAPI32 := syscall.MustLoadDLL("AdvAPI32.dll") + registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW") + + sourceName, _ := os.Executable() + sourceNameUTF16, _ := syscall.UTF16PtrFromString(sourceName) + handle, _, lastErr := registerEventSource.Call(uintptr(0), uintptr(unsafe.Pointer(sourceNameUTF16))) + if lastErr == nil { // On error, logging is a no-op + return func(eventType eventType, eventID int32, msg string) {} + } + reportEvent := advAPI32.MustFindProc("ReportEventW") + return func(eventType eventType, eventID int32, msg string) { + s, _ := syscall.UTF16PtrFromString(msg) + _, _, _ = reportEvent.Call( + uintptr(handle), // HANDLE hEventLog + uintptr(eventType), // WORD wType + uintptr(0), // WORD wCategory + uintptr(eventID), // DWORD dwEventID + uintptr(0), // PSID lpUserSid + uintptr(1), // WORD wNumStrings + uintptr(0), // DWORD dwDataSize + uintptr(unsafe.Pointer(&s)), // LPCTSTR *lpStrings + uintptr(0)) // LPVOID lpRawData + } +}() diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go new file mode 100755 index 000000000..b5ab05f4d --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go @@ -0,0 +1,161 @@ +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package pipeline implements an HTTP request/response middleware pipeline whose +policy objects mutate an HTTP request's URL, query parameters, and/or headers before +the request is sent over the wire. + +Not all policy objects mutate an HTTP request; some policy objects simply impact the +flow of requests/responses by performing operations such as logging, retry policies, +timeouts, failure injection, and deserialization of response payloads. + +Implementing the Policy Interface + +To implement a policy, define a struct that implements the pipeline.Policy interface's Do method. Your Do +method is called when an HTTP request wants to be sent over the network. Your Do method can perform any +operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, and/or query +parameters, inject a failure, etc. Your Do method must then forward the HTTP request to next Policy object +in a linked-list ensuring that the remaining Policy objects perform their work. Ultimately, the last Policy +object sends the HTTP request over the network (by calling the HTTPSender's Do method). + +When an HTTP response comes back, each Policy object in the linked-list gets a chance to process the response +(in reverse order). The Policy object can log the response, retry the operation if due to a transient failure +or timeout, deserialize the response body, etc. Ultimately, the last Policy object returns the HTTP response +to the code that initiated the original HTTP request. + +Here is a template for how to define a pipeline.Policy object: + + type myPolicy struct { + node PolicyNode + // TODO: Add configuration/setting fields here (if desired)... + } + + func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + // TODO: Mutate/process the HTTP request here... + response, err := p.node.Do(ctx, request) // Forward HTTP request to next Policy & get HTTP response + // TODO: Mutate/process the HTTP response here... + return response, err // Return response/error to previous Policy + } + +Implementing the Factory Interface + +Each Policy struct definition requires a factory struct definition that implements the pipeline.Factory interface's New +method. The New method is called when application code wants to initiate a new HTTP request. Factory's New method is +passed a pipeline.PolicyNode object which contains a reference to the owning pipeline.Pipeline object (discussed later) and +a reference to the next Policy object in the linked list. The New method should create its corresponding Policy object +passing it the PolicyNode and any other configuration/settings fields appropriate for the specific Policy object. + +Here is a template for how to define a pipeline.Policy object: + + // NOTE: Once created & initialized, Factory objects should be goroutine-safe (ex: immutable); + // this allows reuse (efficient use of memory) and makes these objects usable by multiple goroutines concurrently. + type myPolicyFactory struct { + // TODO: Add any configuration/setting fields if desired... + } + + func (f *myPolicyFactory) New(node pipeline.PolicyNode) Policy { + return &myPolicy{node: node} // TODO: Also initialize any configuration/setting fields here (if desired)... + } + +Using your Factory and Policy objects via a Pipeline + +To use the Factory and Policy objects, an application constructs a slice of Factory objects and passes +this slice to the pipeline.NewPipeline function. + + func NewPipeline(factories []pipeline.Factory, sender pipeline.HTTPSender) Pipeline + +This function also requires an object implementing the HTTPSender interface. For simple scenarios, +passing nil for HTTPSender causes a standard Go http.Client object to be created and used to actually +send the HTTP response over the network. For more advanced scenarios, you can pass your own HTTPSender +object in. This allows sharing of http.Client objects or the use of custom-configured http.Client objects +or other objects that can simulate the network requests for testing purposes. + +Now that you have a pipeline.Pipeline object, you can create a pipeline.Request object (which is a simple +wrapper around Go's standard http.Request object) and pass it to Pipeline's Do method along with passing a +context.Context for cancelling the HTTP request (if desired). + + type Pipeline interface { + Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error) + } + +Do iterates over the slice of Factory objects and tells each one to create its corresponding +Policy object. After the linked-list of Policy objects have been created, Do calls the first +Policy object passing it the Context & HTTP request parameters. These parameters now flow through +all the Policy objects giving each object a chance to look at and/or mutate the HTTP request. +The last Policy object sends the message over the network. + +When the network operation completes, the HTTP response and error return values pass +back through the same Policy objects in reverse order. Most Policy objects ignore the +response/error but some log the result, retry the operation (depending on the exact +reason the operation failed), or deserialize the response's body. Your own Policy +objects can do whatever they like when processing outgoing requests or incoming responses. + +Note that after an I/O request runs to completion, the Policy objects for that request +are garbage collected. However, Pipeline object (like Factory objects) are goroutine-safe allowing +them to be created once and reused over many I/O operations. This allows for efficient use of +memory and also makes them safely usable by multiple goroutines concurrently. + +Inserting a Method-Specific Factory into the Linked-List of Policy Objects + +While Pipeline and Factory objects can be reused over many different operations, it is +common to have special behavior for a specific operation/method. For example, a method +may need to deserialize the response's body to an instance of a specific data type. +To accommodate this, the Pipeline's Do method takes an additional method-specific +Factory object. The Do method tells this Factory to create a Policy object and +injects this method-specific Policy object into the linked-list of Policy objects. + +When creating a Pipeline object, the slice of Factory objects passed must have 1 +(and only 1) entry marking where the method-specific Factory should be injected. +The Factory marker is obtained by calling the pipeline.MethodFactoryMarker() function: + + func MethodFactoryMarker() pipeline.Factory + +Creating an HTTP Request Object + +The HTTP request object passed to Pipeline's Do method is not Go's http.Request struct. +Instead, it is a pipeline.Request struct which is a simple wrapper around Go's standard +http.Request. You create a pipeline.Request object by calling the pipeline.NewRequest function: + + func NewRequest(method string, url url.URL, options pipeline.RequestOptions) (request pipeline.Request, err error) + +To this function, you must pass a pipeline.RequestOptions that looks like this: + + type RequestOptions struct { + // The readable and seekable stream to be sent to the server as the request's body. + Body io.ReadSeeker + + // The callback method (if not nil) to be invoked to report progress as the stream is uploaded in the HTTP request. + Progress ProgressReceiver + } + +The method and struct ensure that the request's body stream is a read/seekable stream. +A seekable stream is required so that upon retry, the final Policy object can seek +the stream back to the beginning before retrying the network request and re-uploading the +body. In addition, you can associate a ProgressReceiver callback function which will be +invoked periodically to report progress while bytes are being read from the body stream +and sent over the network. + +Processing the HTTP Response + +When an HTTP response comes in from the network, a reference to Go's http.Response struct is +embedded in a struct that implements the pipeline.Response interface: + + type Response interface { + Response() *http.Response + } + +This interface is returned through all the Policy objects. Each Policy object can call the Response +interface's Response method to examine (or mutate) the embedded http.Response object. + +A Policy object can internally define another struct (implementing the pipeline.Response interface) +that embeds an http.Response and adds additional fields and return this structure to other Policy +objects. This allows a Policy object to deserialize the body to some other struct and return the +original http.Response and the additional struct back through the Policy chain. Other Policy objects +can see the Response but cannot see the additional struct with the deserialized body. After all the +Policy objects have returned, the pipeline.Response interface is returned by Pipeline's Do method. +The caller of this method can perform a type assertion attempting to get back to the struct type +really returned by the Policy object. If the type assertion is successful, the caller now has +access to both the http.Response and the deserialized struct object.*/ +package pipeline diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go new file mode 100755 index 000000000..fd008364d --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go @@ -0,0 +1,121 @@ +package pipeline + +import ( + "fmt" + "runtime" +) + +type causer interface { + Cause() error +} + +// ErrorNode can be an embedded field in a private error object. This field +// adds Program Counter support and a 'cause' (reference to a preceding error). +// When initializing a error type with this embedded field, initialize the +// ErrorNode field by calling ErrorNode{}.Initialize(cause). +type ErrorNode struct { + pc uintptr // Represents a Program Counter that you can get symbols for. + cause error // Refers to the preceding error (or nil) +} + +// Error returns a string with the PC's symbols or "" if the PC is invalid. +// When defining a new error type, have its Error method call this one passing +// it the string representation of the error. +func (e *ErrorNode) Error(msg string) string { + s := "" + if fn := runtime.FuncForPC(e.pc); fn != nil { + file, line := fn.FileLine(e.pc) + s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line) + } + s += msg + "\n\n" + if e.cause != nil { + s += e.cause.Error() + "\n" + } + return s +} + +// Cause returns the error that preceded this error. +func (e *ErrorNode) Cause() error { return e.cause } + +// Temporary returns true if the error occurred due to a temporary condition. +func (e ErrorNode) Temporary() bool { + type temporary interface { + Temporary() bool + } + + for err := e.cause; err != nil; { + if t, ok := err.(temporary); ok { + return t.Temporary() + } + + if cause, ok := err.(causer); ok { + err = cause.Cause() + } else { + err = nil + } + } + return false +} + +// Timeout returns true if the error occurred due to time expiring. +func (e ErrorNode) Timeout() bool { + type timeout interface { + Timeout() bool + } + + for err := e.cause; err != nil; { + if t, ok := err.(timeout); ok { + return t.Timeout() + } + + if cause, ok := err.(causer); ok { + err = cause.Cause() + } else { + err = nil + } + } + return false +} + +// Initialize is used to initialize an embedded ErrorNode field. +// It captures the caller's program counter and saves the cause (preceding error). +// To initialize the field, use "ErrorNode{}.Initialize(cause, 3)". A callersToSkip +// value of 3 is very common; but, depending on your code nesting, you may need +// a different value. +func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode { + // Get the PC of Initialize method's caller. + pc := [1]uintptr{} + _ = runtime.Callers(callersToSkip, pc[:]) + return ErrorNode{pc: pc[0], cause: cause} +} + +// Cause walks all the preceding errors and return the originating error. +func Cause(err error) error { + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} + +// NewError creates a simple string error (like Error.New). But, this +// error also captures the caller's Program Counter and the preceding error. +func NewError(cause error, msg string) error { + return &pcError{ + ErrorNode: ErrorNode{}.Initialize(cause, 3), + msg: msg, + } +} + +// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause). +type pcError struct { + ErrorNode + msg string +} + +// Error satisfies the error interface. It shows the error with Program Counter +// symbols and calls Error on the preceding error so you can see the full error chain. +func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) } diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go new file mode 100755 index 000000000..efa3c8ed0 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go @@ -0,0 +1,82 @@ +package pipeline + +import "io" + +// ********** The following is common between the request body AND the response body. + +// ProgressReceiver defines the signature of a callback function invoked as progress is reported. +type ProgressReceiver func(bytesTransferred int64) + +// ********** The following are specific to the request body (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type requestBodyProgress struct { + requestBody io.ReadSeeker // Seeking is required to support retries + pr ProgressReceiver +} + +// NewRequestBodyProgress adds progress reporting to an HTTP request's body stream. +func NewRequestBodyProgress(requestBody io.ReadSeeker, pr ProgressReceiver) io.ReadSeeker { + if pr == nil { + panic("pr must not be nil") + } + return &requestBodyProgress{requestBody: requestBody, pr: pr} +} + +// Read reads a block of data from an inner stream and reports progress +func (rbp *requestBodyProgress) Read(p []byte) (n int, err error) { + n, err = rbp.requestBody.Read(p) + if err != nil { + return + } + // Invokes the user's callback method to report progress + position, err := rbp.requestBody.Seek(0, io.SeekCurrent) + if err != nil { + panic(err) + } + rbp.pr(position) + return +} + +func (rbp *requestBodyProgress) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return rbp.requestBody.Seek(offset, whence) +} + +// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. +func (rbp *requestBodyProgress) Close() error { + if c, ok := rbp.requestBody.(io.Closer); ok { + return c.Close() + } + return nil +} + +// ********** The following are specific to the response body (a ReadCloser) + +// This struct is used when sending a body to the network +type responseBodyProgress struct { + responseBody io.ReadCloser + pr ProgressReceiver + offset int64 +} + +// NewResponseBodyProgress adds progress reporting to an HTTP response's body stream. +func NewResponseBodyProgress(responseBody io.ReadCloser, pr ProgressReceiver) io.ReadCloser { + if pr == nil { + panic("pr must not be nil") + } + return &responseBodyProgress{responseBody: responseBody, pr: pr, offset: 0} +} + +// Read reads a block of data from an inner stream and reports progress +func (rbp *responseBodyProgress) Read(p []byte) (n int, err error) { + n, err = rbp.responseBody.Read(p) + rbp.offset += int64(n) + + // Invokes the user's callback method to report progress + rbp.pr(rbp.offset) + return +} + +func (rbp *responseBodyProgress) Close() error { + return rbp.responseBody.Close() +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go new file mode 100755 index 000000000..1fbe72bd4 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go @@ -0,0 +1,147 @@ +package pipeline + +import ( + "io" + "net/http" + "net/url" + "strconv" +) + +// Request is a thin wrapper over an http.Request. The wrapper provides several helper methods. +type Request struct { + *http.Request +} + +// NewRequest initializes a new HTTP request object with any desired options. +func NewRequest(method string, url url.URL, body io.ReadSeeker) (request Request, err error) { + // Note: the url is passed by value so that any pipeline operations that modify it do so on a copy. + + // This code to construct an http.Request is copied from http.NewRequest(); we intentionally omitted removeEmptyPort for now. + request.Request = &http.Request{ + Method: method, + URL: &url, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: url.Host, + } + + if body != nil { + err = request.SetBody(body) + } + return +} + +// SetBody sets the body and content length, assumes body is not nil. +func (r Request) SetBody(body io.ReadSeeker) error { + size, err := body.Seek(0, io.SeekEnd) + if err != nil { + return err + } + + body.Seek(0, io.SeekStart) + r.ContentLength = size + r.Header["Content-Length"] = []string{strconv.FormatInt(size, 10)} + + if size != 0 { + r.Body = &retryableRequestBody{body: body} + r.GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) + if err != nil { + return nil, err + } + return r.Body, nil + } + } else { + // in case the body is an empty stream, we need to use http.NoBody to explicitly provide no content + r.Body = http.NoBody + r.GetBody = func() (io.ReadCloser, error) { + return http.NoBody, nil + } + + // close the user-provided empty body + if c, ok := body.(io.Closer); ok { + c.Close() + } + } + + return nil +} + +// Copy makes a copy of an http.Request. Specifically, it makes a deep copy +// of its Method, URL, Host, Proto(Major/Minor), Header. ContentLength, Close, +// RemoteAddr, RequestURI. Copy makes a shallow copy of the Body, GetBody, TLS, +// Cancel, Response, and ctx fields. Copy panics if any of these fields are +// not nil: TransferEncoding, Form, PostForm, MultipartForm, or Trailer. +func (r Request) Copy() Request { + if r.TransferEncoding != nil || r.Form != nil || r.PostForm != nil || r.MultipartForm != nil || r.Trailer != nil { + panic("Can't make a deep copy of the http.Request because at least one of the following is not nil:" + + "TransferEncoding, Form, PostForm, MultipartForm, or Trailer.") + } + copy := *r.Request // Copy the request + urlCopy := *(r.Request.URL) // Copy the URL + copy.URL = &urlCopy + copy.Header = http.Header{} // Copy the header + for k, vs := range r.Header { + for _, value := range vs { + copy.Header.Add(k, value) + } + } + return Request{Request: ©} // Return the copy +} + +func (r Request) close() error { + if r.Body != nil && r.Body != http.NoBody { + c, ok := r.Body.(*retryableRequestBody) + if !ok { + panic("unexpected request body type (should be *retryableReadSeekerCloser)") + } + return c.realClose() + } + return nil +} + +// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. +func (r Request) RewindBody() error { + if r.Body != nil && r.Body != http.NoBody { + s, ok := r.Body.(io.Seeker) + if !ok { + panic("unexpected request body type (should be io.Seeker)") + } + + // Reset the stream back to the beginning + _, err := s.Seek(0, io.SeekStart) + return err + } + return nil +} + +// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type retryableRequestBody struct { + body io.ReadSeeker // Seeking is required to support retries +} + +// Read reads a block of data from an inner stream and reports progress +func (b *retryableRequestBody) Read(p []byte) (n int, err error) { + return b.body.Read(p) +} + +func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return b.body.Seek(offset, whence) +} + +func (b *retryableRequestBody) Close() error { + // We don't want the underlying transport to close the request body on transient failures so this is a nop. + // The pipeline closes the request body upon success. + return nil +} + +func (b *retryableRequestBody) realClose() error { + if c, ok := b.body.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go new file mode 100755 index 000000000..f2dc16482 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go @@ -0,0 +1,74 @@ +package pipeline + +import ( + "bytes" + "fmt" + "net/http" + "sort" + "strings" +) + +// The Response interface exposes an http.Response object as it returns through the pipeline of Policy objects. +// This ensures that Policy objects have access to the HTTP response. However, the object this interface encapsulates +// might be a struct with additional fields that is created by a Policy object (typically a method-specific Factory). +// The method that injected the method-specific Factory gets this returned Response and performs a type assertion +// to the expected struct and returns the struct to its caller. +type Response interface { + Response() *http.Response +} + +// This is the default struct that has the http.Response. +// A method can replace this struct with its own struct containing an http.Response +// field and any other additional fields. +type httpResponse struct { + response *http.Response +} + +// NewHTTPResponse is typically called by a Policy object to return a Response object. +func NewHTTPResponse(response *http.Response) Response { + return &httpResponse{response: response} +} + +// This method satisfies the public Response interface's Response method +func (r httpResponse) Response() *http.Response { + return r.response +} + +// WriteRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are +// not nil, then these are also written into the Buffer. +func WriteRequestWithResponse(b *bytes.Buffer, request *http.Request, response *http.Response, err error) { + // Write the request into the buffer. + fmt.Fprint(b, " "+request.Method+" "+request.URL.String()+"\n") + writeHeader(b, request.Header) + if response != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n") + writeHeader(b, response.Header) + } + if err != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +func writeHeader(b *bytes.Buffer, header map[string][]string) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + // Redact the value of any Authorization header to prevent security information from persisting in logs + value := interface{}("REDACTED") + if !strings.EqualFold(k, "Authorization") { + value = header[k] + } + fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go new file mode 100644 index 000000000..c4bb62d81 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go @@ -0,0 +1,9 @@ +package pipeline + +const ( + // UserAgent is the string to be used in the user agent string when making requests. + UserAgent = "azure-pipeline-go/" + Version + + // Version is the semantic version (see http://semver.org) of the pipeline package. + Version = "0.1.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE deleted file mode 100644 index af39a91e7..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE deleted file mode 100644 index 2d1d72608..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Microsoft Azure-SDK-for-Go -Copyright 2014-2017 Microsoft - -This product includes software developed at -the Microsoft Corporation (https://www.microsoft.com). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go deleted file mode 100644 index 8b5b96d48..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go +++ /dev/null @@ -1,91 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "time" -) - -// PutAppendBlob initializes an empty append blob with specified name. An -// append blob must be created using this method before appending blocks. -// -// See CreateBlockBlobFromReader for more info on creating blobs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob -func (b *Blob) PutAppendBlob(options *PutBlobOptions) error { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypeAppend) -} - -// AppendBlockOptions includes the options for an append block operation -type AppendBlockOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - MaxSize *uint `header:"x-ms-blob-condition-maxsize"` - AppendPosition *uint `header:"x-ms-blob-condition-appendpos"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` - ContentMD5 bool -} - -// AppendBlock appends a block to an append blob. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block -func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error { - params := url.Values{"comp": {"appendblock"}} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - if options.ContentMD5 { - md5sum := md5.Sum(chunk) - headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:]) - } - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypeAppend) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go deleted file mode 100644 index 76794c305..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go +++ /dev/null @@ -1,246 +0,0 @@ -// Package storage provides clients for Microsoft Azure Storage Services. -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "net/url" - "sort" - "strings" -) - -// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services - -type authentication string - -const ( - sharedKey authentication = "sharedKey" - sharedKeyForTable authentication = "sharedKeyTable" - sharedKeyLite authentication = "sharedKeyLite" - sharedKeyLiteForTable authentication = "sharedKeyLiteTable" - - // headers - headerAcceptCharset = "Accept-Charset" - headerAuthorization = "Authorization" - headerContentLength = "Content-Length" - headerDate = "Date" - headerXmsDate = "x-ms-date" - headerXmsVersion = "x-ms-version" - headerContentEncoding = "Content-Encoding" - headerContentLanguage = "Content-Language" - headerContentType = "Content-Type" - headerContentMD5 = "Content-MD5" - headerIfModifiedSince = "If-Modified-Since" - headerIfMatch = "If-Match" - headerIfNoneMatch = "If-None-Match" - headerIfUnmodifiedSince = "If-Unmodified-Since" - headerRange = "Range" - headerDataServiceVersion = "DataServiceVersion" - headerMaxDataServiceVersion = "MaxDataServiceVersion" - headerContentTransferEncoding = "Content-Transfer-Encoding" -) - -func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) { - if !c.sasClient { - authHeader, err := c.getSharedKey(verb, url, headers, auth) - if err != nil { - return nil, err - } - headers[headerAuthorization] = authHeader - } - return headers, nil -} - -func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) { - canRes, err := c.buildCanonicalizedResource(url, auth, false) - if err != nil { - return "", err - } - - canString, err := buildCanonicalizedString(verb, headers, canRes, auth) - if err != nil { - return "", err - } - return c.createAuthorizationHeader(canString, auth), nil -} - -func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) { - errMsg := "buildCanonicalizedResource error: %s" - u, err := url.Parse(uri) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - cr := bytes.NewBufferString("") - if c.accountName != StorageEmulatorAccountName || !sas { - cr.WriteString("/") - cr.WriteString(c.getCanonicalizedAccountName()) - } - - if len(u.Path) > 0 { - // Any portion of the CanonicalizedResource string that is derived from - // the resource's URI should be encoded exactly as it is in the URI. - // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx - cr.WriteString(u.EscapedPath()) - } - - params, err := url.ParseQuery(u.RawQuery) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 - if auth == sharedKey { - if len(params) > 0 { - cr.WriteString("\n") - - keys := []string{} - for key := range params { - keys = append(keys, key) - } - sort.Strings(keys) - - completeParams := []string{} - for _, key := range keys { - if len(params[key]) > 1 { - sort.Strings(params[key]) - } - - completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) - } - cr.WriteString(strings.Join(completeParams, "\n")) - } - } else { - // search for "comp" parameter, if exists then add it to canonicalizedresource - if v, ok := params["comp"]; ok { - cr.WriteString("?comp=" + v[0]) - } - } - - return string(cr.Bytes()), nil -} - -func (c *Client) getCanonicalizedAccountName() string { - // since we may be trying to access a secondary storage account, we need to - // remove the -secondary part of the storage name - return strings.TrimSuffix(c.accountName, "-secondary") -} - -func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) { - contentLength := headers[headerContentLength] - if contentLength == "0" { - contentLength = "" - } - date := headers[headerDate] - if v, ok := headers[headerXmsDate]; ok { - if auth == sharedKey || auth == sharedKeyLite { - date = "" - } else { - date = v - } - } - var canString string - switch auth { - case sharedKey: - canString = strings.Join([]string{ - verb, - headers[headerContentEncoding], - headers[headerContentLanguage], - contentLength, - headers[headerContentMD5], - headers[headerContentType], - date, - headers[headerIfModifiedSince], - headers[headerIfMatch], - headers[headerIfNoneMatch], - headers[headerIfUnmodifiedSince], - headers[headerRange], - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - case sharedKeyForTable: - canString = strings.Join([]string{ - verb, - headers[headerContentMD5], - headers[headerContentType], - date, - canonicalizedResource, - }, "\n") - case sharedKeyLite: - canString = strings.Join([]string{ - verb, - headers[headerContentMD5], - headers[headerContentType], - date, - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - case sharedKeyLiteForTable: - canString = strings.Join([]string{ - date, - canonicalizedResource, - }, "\n") - default: - return "", fmt.Errorf("%s authentication is not supported yet", auth) - } - return canString, nil -} - -func buildCanonicalizedHeader(headers map[string]string) string { - cm := make(map[string]string) - - for k, v := range headers { - headerName := strings.TrimSpace(strings.ToLower(k)) - if strings.HasPrefix(headerName, "x-ms-") { - cm[headerName] = v - } - } - - if len(cm) == 0 { - return "" - } - - keys := []string{} - for key := range cm { - keys = append(keys, key) - } - - sort.Strings(keys) - - ch := bytes.NewBufferString("") - - for _, key := range keys { - ch.WriteString(key) - ch.WriteRune(':') - ch.WriteString(cm[key]) - ch.WriteRune('\n') - } - - return strings.TrimSuffix(string(ch.Bytes()), "\n") -} - -func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string { - signature := c.computeHmac256(canonicalizedString) - var key string - switch auth { - case sharedKey, sharedKeyForTable: - key = "SharedKey" - case sharedKeyLite, sharedKeyLiteForTable: - key = "SharedKeyLite" - } - return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go deleted file mode 100644 index 24ee5db6f..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ /dev/null @@ -1,632 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// A Blob is an entry in BlobListResponse. -type Blob struct { - Container *Container - Name string `xml:"Name"` - Snapshot time.Time `xml:"Snapshot"` - Properties BlobProperties `xml:"Properties"` - Metadata BlobMetadata `xml:"Metadata"` -} - -// PutBlobOptions includes the options any put blob operation -// (page, block, append) -type PutBlobOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - Origin string `header:"Origin"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// BlobMetadata is a set of custom name/value pairs. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx -type BlobMetadata map[string]string - -type blobMetadataEntries struct { - Entries []blobMetadataEntry `xml:",any"` -} -type blobMetadataEntry struct { - XMLName xml.Name - Value string `xml:",chardata"` -} - -// UnmarshalXML converts the xml:Metadata into Metadata map -func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var entries blobMetadataEntries - if err := d.DecodeElement(&entries, &start); err != nil { - return err - } - for _, entry := range entries.Entries { - if *bm == nil { - *bm = make(BlobMetadata) - } - (*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value - } - return nil -} - -// MarshalXML implements the xml.Marshaler interface. It encodes -// metadata name/value pairs as they would appear in an Azure -// ListBlobs response. -func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - entries := make([]blobMetadataEntry, 0, len(bm)) - for k, v := range bm { - entries = append(entries, blobMetadataEntry{ - XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)}, - Value: v, - }) - } - return enc.EncodeElement(blobMetadataEntries{ - Entries: entries, - }, start) -} - -// BlobProperties contains various properties of a blob -// returned in various endpoints like ListBlobs or GetBlobProperties. -type BlobProperties struct { - LastModified TimeRFC1123 `xml:"Last-Modified"` - Etag string `xml:"Etag"` - ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"` - ContentLength int64 `xml:"Content-Length"` - ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"` - ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"` - CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"` - ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"` - ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"` - BlobType BlobType `xml:"BlobType"` - SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` - CopyID string `xml:"CopyId"` - CopyStatus string `xml:"CopyStatus"` - CopySource string `xml:"CopySource"` - CopyProgress string `xml:"CopyProgress"` - CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"` - CopyStatusDescription string `xml:"CopyStatusDescription"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` - LeaseDuration string `xml:"LeaseDuration"` - ServerEncrypted bool `xml:"ServerEncrypted"` - IncrementalCopy bool `xml:"IncrementalCopy"` -} - -// BlobType defines the type of the Azure Blob. -type BlobType string - -// Types of page blobs -const ( - BlobTypeBlock BlobType = "BlockBlob" - BlobTypePage BlobType = "PageBlob" - BlobTypeAppend BlobType = "AppendBlob" -) - -func (b *Blob) buildPath() string { - return b.Container.buildPath() + "/" + b.Name -} - -// Exists returns true if a blob with given name exists on the specified -// container of the storage account. -func (b *Blob) Exists() (bool, error) { - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil) - headers := b.Container.bsc.client.getStandardHeaders() - resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) - if resp != nil { - defer readAndCloseBody(resp.Body) - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusOK, nil - } - } - return false, err -} - -// GetURL gets the canonical URL to the blob with the specified name in the -// specified container. -// This method does not create a publicly accessible URL if the blob or container -// is private and this method does not check if the blob exists. -func (b *Blob) GetURL() string { - container := b.Container.Name - if container == "" { - container = "$root" - } - return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil) -} - -// GetBlobRangeOptions includes the options for a get blob range operation -type GetBlobRangeOptions struct { - Range *BlobRange - GetRangeContentMD5 bool - *GetBlobOptions -} - -// GetBlobOptions includes the options for a get blob operation -type GetBlobOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - Origin string `header:"Origin"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// BlobRange represents the bytes range to be get -type BlobRange struct { - Start uint64 - End uint64 -} - -func (br BlobRange) String() string { - if br.End == 0 { - return fmt.Sprintf("bytes=%d-", br.Start) - } - return fmt.Sprintf("bytes=%d-%d", br.Start, br.End) -} - -// Get returns a stream to read the blob. Caller must call both Read and Close() -// to correctly close the underlying connection. -// -// See the GetRange method for use with a Range header. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob -func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) { - rangeOptions := GetBlobRangeOptions{ - GetBlobOptions: options, - } - resp, err := b.getRange(&rangeOptions) - if err != nil { - return nil, err - } - - if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { - return nil, err - } - if err := b.writeProperties(resp.Header, true); err != nil { - return resp.Body, err - } - return resp.Body, nil -} - -// GetRange reads the specified range of a blob to a stream. The bytesRange -// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. -// Caller must call both Read and Close()// to correctly close the underlying -// connection. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob -func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) { - resp, err := b.getRange(options) - if err != nil { - return nil, err - } - - if err := checkRespCode(resp, []int{http.StatusPartialContent}); err != nil { - return nil, err - } - // Content-Length header should not be updated, as the service returns the range length - // (which is not alwys the full blob length) - if err := b.writeProperties(resp.Header, false); err != nil { - return resp.Body, err - } - return resp.Body, nil -} - -func (b *Blob) getRange(options *GetBlobRangeOptions) (*http.Response, error) { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - if options.Range != nil { - headers["Range"] = options.Range.String() - if options.GetRangeContentMD5 { - headers["x-ms-range-get-content-md5"] = "true" - } - } - if options.GetBlobOptions != nil { - headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions)) - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - } - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return nil, err - } - return resp, err -} - -// SnapshotOptions includes the options for a snapshot blob operation -type SnapshotOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// CreateSnapshot creates a snapshot for a blob -// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx -func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) { - params := url.Values{"comp": {"snapshot"}} - headers := b.Container.bsc.client.getStandardHeaders() - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil || resp == nil { - return nil, err - } - defer readAndCloseBody(resp.Body) - - if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil { - return nil, err - } - - snapshotResponse := resp.Header.Get(http.CanonicalHeaderKey("x-ms-snapshot")) - if snapshotResponse != "" { - snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse) - if err != nil { - return nil, err - } - return &snapshotTimestamp, nil - } - - return nil, errors.New("Snapshot not created") -} - -// GetBlobPropertiesOptions includes the options for a get blob properties operation -type GetBlobPropertiesOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetProperties provides various information about the specified blob. -// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx -func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - return err - } - return b.writeProperties(resp.Header, true) -} - -func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error { - var err error - - contentLength := b.Properties.ContentLength - if includeContentLen { - contentLengthStr := h.Get("Content-Length") - if contentLengthStr != "" { - contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) - if err != nil { - return err - } - } - } - - var sequenceNum int64 - sequenceNumStr := h.Get("x-ms-blob-sequence-number") - if sequenceNumStr != "" { - sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) - if err != nil { - return err - } - } - - lastModified, err := getTimeFromHeaders(h, "Last-Modified") - if err != nil { - return err - } - - copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time") - if err != nil { - return err - } - - b.Properties = BlobProperties{ - LastModified: TimeRFC1123(*lastModified), - Etag: h.Get("Etag"), - ContentMD5: h.Get("Content-MD5"), - ContentLength: contentLength, - ContentEncoding: h.Get("Content-Encoding"), - ContentType: h.Get("Content-Type"), - ContentDisposition: h.Get("Content-Disposition"), - CacheControl: h.Get("Cache-Control"), - ContentLanguage: h.Get("Content-Language"), - SequenceNumber: sequenceNum, - CopyCompletionTime: TimeRFC1123(*copyCompletionTime), - CopyStatusDescription: h.Get("x-ms-copy-status-description"), - CopyID: h.Get("x-ms-copy-id"), - CopyProgress: h.Get("x-ms-copy-progress"), - CopySource: h.Get("x-ms-copy-source"), - CopyStatus: h.Get("x-ms-copy-status"), - BlobType: BlobType(h.Get("x-ms-blob-type")), - LeaseStatus: h.Get("x-ms-lease-status"), - LeaseState: h.Get("x-ms-lease-state"), - } - b.writeMetadata(h) - return nil -} - -// SetBlobPropertiesOptions contains various properties of a blob and is an entry -// in SetProperties -type SetBlobPropertiesOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - Origin string `header:"Origin"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - SequenceNumberAction *SequenceNumberAction - RequestID string `header:"x-ms-client-request-id"` -} - -// SequenceNumberAction defines how the blob's sequence number should be modified -type SequenceNumberAction string - -// Options for sequence number action -const ( - SequenceNumberActionMax SequenceNumberAction = "max" - SequenceNumberActionUpdate SequenceNumberAction = "update" - SequenceNumberActionIncrement SequenceNumberAction = "increment" -) - -// SetProperties replaces the BlobHeaders for the specified blob. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetBlobProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties -func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error { - params := url.Values{"comp": {"properties"}} - headers := b.Container.bsc.client.getStandardHeaders() - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - if b.Properties.BlobType == BlobTypePage { - headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength)) - if options != nil && options.SequenceNumberAction != nil { - headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction)) - if *options.SequenceNumberAction != SequenceNumberActionIncrement { - headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber)) - } - } - } - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusOK}) -} - -// SetBlobMetadataOptions includes the options for a set blob metadata operation -type SetBlobMetadataOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// SetMetadata replaces the metadata for the specified blob. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetBlobMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error { - params := url.Values{"comp": {"metadata"}} - headers := b.Container.bsc.client.getStandardHeaders() - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusOK}) -} - -// GetBlobMetadataOptions includes the options for a get blob metadata operation -type GetBlobMetadataOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetMetadata returns all user-defined metadata for the specified blob. -// -// All metadata keys will be returned in lower case. (HTTP header -// names are case-insensitive.) -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error { - params := url.Values{"comp": {"metadata"}} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - - if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { - return err - } - - b.writeMetadata(resp.Header) - return nil -} - -func (b *Blob) writeMetadata(h http.Header) { - b.Metadata = BlobMetadata(writeMetadata(h)) -} - -// DeleteBlobOptions includes the options for a delete blob operation -type DeleteBlobOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - DeleteSnapshots *bool - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// Delete deletes the given blob from the specified container. -// If the blob does not exists at the time of the Delete Blob operation, it -// returns error. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob -func (b *Blob) Delete(options *DeleteBlobOptions) error { - resp, err := b.delete(options) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusAccepted}) -} - -// DeleteIfExists deletes the given blob from the specified container If the -// blob is deleted with this call, returns true. Otherwise returns false. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob -func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) { - resp, err := b.delete(options) - if resp != nil { - defer readAndCloseBody(resp.Body) - if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusAccepted, nil - } - } - return false, err -} - -func (b *Blob) delete(options *DeleteBlobOptions) (*http.Response, error) { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - headers = mergeHeaders(headers, headersFromStruct(*options)) - if options.DeleteSnapshots != nil { - if *options.DeleteSnapshots { - headers["x-ms-delete-snapshots"] = "include" - } else { - headers["x-ms-delete-snapshots"] = "only" - } - } - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth) -} - -// helper method to construct the path to either a blob or container -func pathForResource(container, name string) string { - if name != "" { - return fmt.Sprintf("/%s/%s", container, name) - } - return fmt.Sprintf("/%s", container) -} - -func (b *Blob) respondCreation(resp *http.Response, bt BlobType) error { - defer readAndCloseBody(resp.Body) - err := checkRespCode(resp, []int{http.StatusCreated}) - if err != nil { - return err - } - b.Properties.BlobType = bt - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go deleted file mode 100644 index 31894dbfc..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go +++ /dev/null @@ -1,174 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "net/url" - "strings" - "time" -) - -// OverrideHeaders defines overridable response heaedrs in -// a request using a SAS URI. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -type OverrideHeaders struct { - CacheControl string - ContentDisposition string - ContentEncoding string - ContentLanguage string - ContentType string -} - -// BlobSASOptions are options to construct a blob SAS -// URI. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -type BlobSASOptions struct { - BlobServiceSASPermissions - OverrideHeaders - SASOptions -} - -// BlobServiceSASPermissions includes the available permissions for -// blob service SAS URI. -type BlobServiceSASPermissions struct { - Read bool - Add bool - Create bool - Write bool - Delete bool -} - -func (p BlobServiceSASPermissions) buildString() string { - permissions := "" - if p.Read { - permissions += "r" - } - if p.Add { - permissions += "a" - } - if p.Create { - permissions += "c" - } - if p.Write { - permissions += "w" - } - if p.Delete { - permissions += "d" - } - return permissions -} - -// GetSASURI creates an URL to the blob which contains the Shared -// Access Signature with the specified options. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) { - uri := b.GetURL() - signedResource := "b" - canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true) - if err != nil { - return "", err - } - - permissions := options.BlobServiceSASPermissions.buildString() - return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) -} - -func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) { - start := "" - if options.Start != (time.Time{}) { - start = options.Start.UTC().Format(time.RFC3339) - } - - expiry := options.Expiry.UTC().Format(time.RFC3339) - - // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). - canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) - canonicalizedResource, err := url.QueryUnescape(canonicalizedResource) - if err != nil { - return "", err - } - - protocols := "" - if options.UseHTTPS { - protocols = "https" - } - stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, headers) - if err != nil { - return "", err - } - - sig := c.computeHmac256(stringToSign) - sasParams := url.Values{ - "sv": {c.apiVersion}, - "se": {expiry}, - "sr": {signedResource}, - "sp": {permissions}, - "sig": {sig}, - } - - if start != "" { - sasParams.Add("st", start) - } - - if c.apiVersion >= "2015-04-05" { - if protocols != "" { - sasParams.Add("spr", protocols) - } - if options.IP != "" { - sasParams.Add("sip", options.IP) - } - } - - // Add override response hedaers - addQueryParameter(sasParams, "rscc", headers.CacheControl) - addQueryParameter(sasParams, "rscd", headers.ContentDisposition) - addQueryParameter(sasParams, "rsce", headers.ContentEncoding) - addQueryParameter(sasParams, "rscl", headers.ContentLanguage) - addQueryParameter(sasParams, "rsct", headers.ContentType) - - sasURL, err := url.Parse(uri) - if err != nil { - return "", err - } - sasURL.RawQuery = sasParams.Encode() - return sasURL.String(), nil -} - -func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion string, headers OverrideHeaders) (string, error) { - rscc := headers.CacheControl - rscd := headers.ContentDisposition - rsce := headers.ContentEncoding - rscl := headers.ContentLanguage - rsct := headers.ContentType - - if signedVersion >= "2015-02-21" { - canonicalizedResource = "/blob" + canonicalizedResource - } - - // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 - if signedVersion >= "2015-04-05" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - - // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - if signedVersion >= "2013-08-15" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - - return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go deleted file mode 100644 index f6a9da282..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go +++ /dev/null @@ -1,182 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" -) - -// BlobStorageClient contains operations for Microsoft Azure Blob Storage -// Service. -type BlobStorageClient struct { - client Client - auth authentication -} - -// GetServiceProperties gets the properties of your storage account's blob service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties -func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) { - return b.client.getServiceProperties(blobServiceName, b.auth) -} - -// SetServiceProperties sets the properties of your storage account's blob service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties -func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error { - return b.client.setServiceProperties(props, blobServiceName, b.auth) -} - -// ListContainersParameters defines the set of customizable parameters to make a -// List Containers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ListContainersParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} - -// GetContainerReference returns a Container object for the specified container name. -func (b *BlobStorageClient) GetContainerReference(name string) *Container { - return &Container{ - bsc: b, - Name: name, - } -} - -// GetContainerReferenceFromSASURI returns a Container object for the specified -// container SASURI -func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) { - path := strings.Split(sasuri.Path, "/") - if len(path) <= 1 { - return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String()) - } - cli := newSASClient().GetBlobService() - return &Container{ - bsc: &cli, - Name: path[1], - sasuri: sasuri, - }, nil -} - -// ListContainers returns the list of containers in a storage account along with -// pagination token and other response details. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - uri := b.client.getEndpoint(blobServiceName, "", q) - headers := b.client.getStandardHeaders() - - type ContainerAlias struct { - bsc *BlobStorageClient - Name string `xml:"Name"` - Properties ContainerProperties `xml:"Properties"` - Metadata BlobMetadata - sasuri url.URL - } - type ContainerListResponseAlias struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Containers []ContainerAlias `xml:"Containers>Container"` - } - - var outAlias ContainerListResponseAlias - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return nil, err - } - defer resp.Body.Close() - err = xmlUnmarshal(resp.Body, &outAlias) - if err != nil { - return nil, err - } - - out := ContainerListResponse{ - XMLName: outAlias.XMLName, - Xmlns: outAlias.Xmlns, - Prefix: outAlias.Prefix, - Marker: outAlias.Marker, - NextMarker: outAlias.NextMarker, - MaxResults: outAlias.MaxResults, - Containers: make([]Container, len(outAlias.Containers)), - } - for i, cnt := range outAlias.Containers { - out.Containers[i] = Container{ - bsc: &b, - Name: cnt.Name, - Properties: cnt.Properties, - Metadata: map[string]string(cnt.Metadata), - sasuri: cnt.sasuri, - } - } - - return &out, err -} - -func (p ListContainersParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) - } - if p.Timeout != 0 { - out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) - } - - return out -} - -func writeMetadata(h http.Header) map[string]string { - metadata := make(map[string]string) - for k, v := range h { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["lol"] = content of the last X-Ms-Meta-Lol header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - return metadata -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go deleted file mode 100644 index 5d9467afd..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go +++ /dev/null @@ -1,270 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// BlockListType is used to filter out types of blocks in a Get Blocks List call -// for a block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all -// block types. -type BlockListType string - -// Filters for listing blocks in block blobs -const ( - BlockListTypeAll BlockListType = "all" - BlockListTypeCommitted BlockListType = "committed" - BlockListTypeUncommitted BlockListType = "uncommitted" -) - -// Maximum sizes (per REST API) for various concepts -const ( - MaxBlobBlockSize = 100 * 1024 * 1024 - MaxBlobPageSize = 4 * 1024 * 1024 -) - -// BlockStatus defines states a block for a block blob can -// be in. -type BlockStatus string - -// List of statuses that can be used to refer to a block in a block list -const ( - BlockStatusUncommitted BlockStatus = "Uncommitted" - BlockStatusCommitted BlockStatus = "Committed" - BlockStatusLatest BlockStatus = "Latest" -) - -// Block is used to create Block entities for Put Block List -// call. -type Block struct { - ID string - Status BlockStatus -} - -// BlockListResponse contains the response fields from Get Block List call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -type BlockListResponse struct { - XMLName xml.Name `xml:"BlockList"` - CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` - UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` -} - -// BlockResponse contains the block information returned -// in the GetBlockListCall. -type BlockResponse struct { - Name string `xml:"Name"` - Size int64 `xml:"Size"` -} - -// CreateBlockBlob initializes an empty block blob with no blocks. -// -// See CreateBlockBlobFromReader for more info on creating blobs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob -func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error { - return b.CreateBlockBlobFromReader(nil, options) -} - -// CreateBlockBlobFromReader initializes a block blob using data from -// reader. Size must be the number of bytes read from reader. To -// create an empty blob, use size==0 and reader==nil. -// -// Any headers set in blob.Properties or metadata in blob.Metadata -// will be set on the blob. -// -// The API rejects requests with size > 256 MiB (but this limit is not -// checked by the SDK). To write a larger blob, use CreateBlockBlob, -// PutBlock, and PutBlockList. -// -// To create a blob from scratch, call container.GetBlobReference() to -// get an empty blob, fill in blob.Properties and blob.Metadata as -// appropriate then call this method. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob -func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - - headers["Content-Length"] = "0" - var n int64 - var err error - if blob != nil { - type lener interface { - Len() int - } - // TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc. - if l, ok := blob.(lener); ok { - n = int64(l.Len()) - } else { - var buf bytes.Buffer - n, err = io.Copy(&buf, blob) - if err != nil { - return err - } - blob = &buf - } - - headers["Content-Length"] = strconv.FormatInt(n, 10) - } - b.Properties.ContentLength = n - - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypeBlock) -} - -// PutBlockOptions includes the options for a put block operation -type PutBlockOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - ContentMD5 string `header:"Content-MD5"` - RequestID string `header:"x-ms-client-request-id"` -} - -// PutBlock saves the given data chunk to the specified block blob with -// given ID. -// -// The API rejects chunks larger than 100 MiB (but this limit is not -// checked by the SDK). -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block -func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error { - return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options) -} - -// PutBlockWithLength saves the given data stream of exactly specified size to -// the block blob with given ID. It is an alternative to PutBlocks where data -// comes as stream but the length is known in advance. -// -// The API rejects requests with size > 100 MiB (but this limit is not -// checked by the SDK). -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block -func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error { - query := url.Values{ - "comp": {"block"}, - "blockid": {blockID}, - } - headers := b.Container.bsc.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%v", size) - - if options != nil { - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypeBlock) -} - -// PutBlockListOptions includes the options for a put block list operation -type PutBlockListOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// PutBlockList saves list of blocks to the specified block blob. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List -func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error { - params := url.Values{"comp": {"blocklist"}} - blockListXML := prepareBlockListRequest(blocks) - headers := b.Container.bsc.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusCreated}) -} - -// GetBlockListOptions includes the options for a get block list operation -type GetBlockListOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetBlockList retrieves list of blocks in the specified block blob. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List -func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) { - params := url.Values{ - "comp": {"blocklist"}, - "blocklisttype": {string(blockType)}, - } - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - var out BlockListResponse - resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go deleted file mode 100644 index 5adba4a1b..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ /dev/null @@ -1,986 +0,0 @@ -// Package storage provides clients for Microsoft Azure Storage Services. -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bufio" - "encoding/base64" - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "io" - "io/ioutil" - "mime" - "mime/multipart" - "net/http" - "net/url" - "regexp" - "runtime" - "strconv" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/version" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" -) - -const ( - // DefaultBaseURL is the domain name used for storage requests in the - // public cloud when a default client is created. - DefaultBaseURL = "core.windows.net" - - // DefaultAPIVersion is the Azure Storage API version string used when a - // basic client is created. - DefaultAPIVersion = "2016-05-31" - - defaultUseHTTPS = true - defaultRetryAttempts = 5 - defaultRetryDuration = time.Second * 5 - - // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator - StorageEmulatorAccountName = "devstoreaccount1" - - // StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator - StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" - - blobServiceName = "blob" - tableServiceName = "table" - queueServiceName = "queue" - fileServiceName = "file" - - storageEmulatorBlob = "127.0.0.1:10000" - storageEmulatorTable = "127.0.0.1:10002" - storageEmulatorQueue = "127.0.0.1:10001" - - userAgentHeader = "User-Agent" - - userDefinedMetadataHeaderPrefix = "x-ms-meta-" - - connectionStringAccountName = "accountname" - connectionStringAccountKey = "accountkey" - connectionStringEndpointSuffix = "endpointsuffix" - connectionStringEndpointProtocol = "defaultendpointsprotocol" - - connectionStringBlobEndpoint = "blobendpoint" - connectionStringFileEndpoint = "fileendpoint" - connectionStringQueueEndpoint = "queueendpoint" - connectionStringTableEndpoint = "tableendpoint" - connectionStringSAS = "sharedaccesssignature" -) - -var ( - validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$") - defaultValidStatusCodes = []int{ - http.StatusRequestTimeout, // 408 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } -) - -// Sender sends a request -type Sender interface { - Send(*Client, *http.Request) (*http.Response, error) -} - -// DefaultSender is the default sender for the client. It implements -// an automatic retry strategy. -type DefaultSender struct { - RetryAttempts int - RetryDuration time.Duration - ValidStatusCodes []int - attempts int // used for testing -} - -// Send is the default retry strategy in the client -func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) { - rr := autorest.NewRetriableRequest(req) - for attempts := 0; attempts < ds.RetryAttempts; attempts++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = c.HTTPClient.Do(rr.Request()) - if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) { - return resp, err - } - autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel) - ds.attempts = attempts - } - ds.attempts++ - return resp, err -} - -// Client is the object that needs to be constructed to perform -// operations on the storage account. -type Client struct { - // HTTPClient is the http.Client used to initiate API - // requests. http.DefaultClient is used when creating a - // client. - HTTPClient *http.Client - - // Sender is an interface that sends the request. Clients are - // created with a DefaultSender. The DefaultSender has an - // automatic retry strategy built in. The Sender can be customized. - Sender Sender - - accountName string - accountKey []byte - useHTTPS bool - UseSharedKeyLite bool - baseURL string - apiVersion string - userAgent string - sasClient bool - accountSASToken url.Values -} - -type odataResponse struct { - resp *http.Response - odata odataErrorWrapper -} - -// AzureStorageServiceError contains fields of the error response from -// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx -// Some fields might be specific to certain calls. -type AzureStorageServiceError struct { - Code string `xml:"Code"` - Message string `xml:"Message"` - AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` - QueryParameterName string `xml:"QueryParameterName"` - QueryParameterValue string `xml:"QueryParameterValue"` - Reason string `xml:"Reason"` - Lang string - StatusCode int - RequestID string - Date string - APIVersion string -} - -type odataErrorMessage struct { - Lang string `json:"lang"` - Value string `json:"value"` -} - -type odataError struct { - Code string `json:"code"` - Message odataErrorMessage `json:"message"` -} - -type odataErrorWrapper struct { - Err odataError `json:"odata.error"` -} - -// UnexpectedStatusCodeError is returned when a storage service responds with neither an error -// nor with an HTTP status code indicating success. -type UnexpectedStatusCodeError struct { - allowed []int - got int - inner error -} - -func (e UnexpectedStatusCodeError) Error() string { - s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } - - got := s(e.got) - expected := []string{} - for _, v := range e.allowed { - expected = append(expected, s(v)) - } - return fmt.Sprintf("storage: status code from service response is %s; was expecting %s. Inner error: %+v", got, strings.Join(expected, " or "), e.inner) -} - -// Got is the actual status code returned by Azure. -func (e UnexpectedStatusCodeError) Got() int { - return e.got -} - -// Inner returns any inner error info. -func (e UnexpectedStatusCodeError) Inner() error { - return e.inner -} - -// NewClientFromConnectionString creates a Client from the connection string. -func NewClientFromConnectionString(input string) (Client, error) { - // build a map of connection string key/value pairs - parts := map[string]string{} - for _, pair := range strings.Split(input, ";") { - if pair == "" { - continue - } - - equalDex := strings.IndexByte(pair, '=') - if equalDex <= 0 { - return Client{}, fmt.Errorf("Invalid connection segment %q", pair) - } - - value := strings.TrimSpace(pair[equalDex+1:]) - key := strings.TrimSpace(strings.ToLower(pair[:equalDex])) - parts[key] = value - } - - // TODO: validate parameter sets? - - if parts[connectionStringAccountName] == StorageEmulatorAccountName { - return NewEmulatorClient() - } - - if parts[connectionStringSAS] != "" { - endpoint := "" - if parts[connectionStringBlobEndpoint] != "" { - endpoint = parts[connectionStringBlobEndpoint] - } else if parts[connectionStringFileEndpoint] != "" { - endpoint = parts[connectionStringFileEndpoint] - } else if parts[connectionStringQueueEndpoint] != "" { - endpoint = parts[connectionStringQueueEndpoint] - } else { - endpoint = parts[connectionStringTableEndpoint] - } - - return NewAccountSASClientFromEndpointToken(endpoint, parts[connectionStringSAS]) - } - - useHTTPS := defaultUseHTTPS - if parts[connectionStringEndpointProtocol] != "" { - useHTTPS = parts[connectionStringEndpointProtocol] == "https" - } - - return NewClient(parts[connectionStringAccountName], parts[connectionStringAccountKey], - parts[connectionStringEndpointSuffix], DefaultAPIVersion, useHTTPS) -} - -// NewBasicClient constructs a Client with given storage service name and -// key. -func NewBasicClient(accountName, accountKey string) (Client, error) { - if accountName == StorageEmulatorAccountName { - return NewEmulatorClient() - } - return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) -} - -// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and -// key in the referenced cloud. -func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) { - if accountName == StorageEmulatorAccountName { - return NewEmulatorClient() - } - return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS) -} - -//NewEmulatorClient contructs a Client intended to only work with Azure -//Storage Emulator -func NewEmulatorClient() (Client, error) { - return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false) -} - -// NewClient constructs a Client. This should be used if the caller wants -// to specify whether to use HTTPS, a specific REST API version or a custom -// storage endpoint than Azure Public Cloud. -func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { - var c Client - if !IsValidStorageAccount(accountName) { - return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName) - } else if accountKey == "" { - return c, fmt.Errorf("azure: account key required") - } else if serviceBaseURL == "" { - return c, fmt.Errorf("azure: base storage service url required") - } - - key, err := base64.StdEncoding.DecodeString(accountKey) - if err != nil { - return c, fmt.Errorf("azure: malformed storage account key: %v", err) - } - - c = Client{ - HTTPClient: http.DefaultClient, - accountName: accountName, - accountKey: key, - useHTTPS: useHTTPS, - baseURL: serviceBaseURL, - apiVersion: apiVersion, - sasClient: false, - UseSharedKeyLite: false, - Sender: &DefaultSender{ - RetryAttempts: defaultRetryAttempts, - ValidStatusCodes: defaultValidStatusCodes, - RetryDuration: defaultRetryDuration, - }, - } - c.userAgent = c.getDefaultUserAgent() - return c, nil -} - -// IsValidStorageAccount checks if the storage account name is valid. -// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account -func IsValidStorageAccount(account string) bool { - return validStorageAccount.MatchString(account) -} - -// NewAccountSASClient contructs a client that uses accountSAS authorization -// for its operations. -func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client { - c := newSASClient() - c.accountSASToken = token - c.accountName = account - c.baseURL = env.StorageEndpointSuffix - - // Get API version and protocol from token - c.apiVersion = token.Get("sv") - c.useHTTPS = token.Get("spr") == "https" - return c -} - -// NewAccountSASClientFromEndpointToken constructs a client that uses accountSAS authorization -// for its operations using the specified endpoint and SAS token. -func NewAccountSASClientFromEndpointToken(endpoint string, sasToken string) (Client, error) { - u, err := url.Parse(endpoint) - if err != nil { - return Client{}, err - } - - token, err := url.ParseQuery(sasToken) - if err != nil { - return Client{}, err - } - - // the host name will look something like this - // - foo.blob.core.windows.net - // "foo" is the account name - // "core.windows.net" is the baseURL - - // find the first dot to get account name - i1 := strings.IndexByte(u.Host, '.') - if i1 < 0 { - return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host) - } - - // now find the second dot to get the base URL - i2 := strings.IndexByte(u.Host[i1+1:], '.') - if i2 < 0 { - return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host[i1+1:]) - } - - c := newSASClient() - c.accountSASToken = token - c.accountName = u.Host[:i1] - c.baseURL = u.Host[i1+i2+2:] - - // Get API version and protocol from token - c.apiVersion = token.Get("sv") - c.useHTTPS = token.Get("spr") == "https" - return c, nil -} - -func newSASClient() Client { - c := Client{ - HTTPClient: http.DefaultClient, - apiVersion: DefaultAPIVersion, - sasClient: true, - Sender: &DefaultSender{ - RetryAttempts: defaultRetryAttempts, - ValidStatusCodes: defaultValidStatusCodes, - RetryDuration: defaultRetryDuration, - }, - } - c.userAgent = c.getDefaultUserAgent() - return c -} - -func (c Client) isServiceSASClient() bool { - return c.sasClient && c.accountSASToken == nil -} - -func (c Client) isAccountSASClient() bool { - return c.sasClient && c.accountSASToken != nil -} - -func (c Client) getDefaultUserAgent() string { - return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - version.Number, - c.apiVersion, - ) -} - -// AddToUserAgent adds an extension to the current user agent -func (c *Client) AddToUserAgent(extension string) error { - if extension != "" { - c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent) -} - -// protectUserAgent is used in funcs that include extraheaders as a parameter. -// It prevents the User-Agent header to be overwritten, instead if it happens to -// be present, it gets added to the current User-Agent. Use it before getStandardHeaders -func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string { - if v, ok := extraheaders[userAgentHeader]; ok { - c.AddToUserAgent(v) - delete(extraheaders, userAgentHeader) - } - return extraheaders -} - -func (c Client) getBaseURL(service string) *url.URL { - scheme := "http" - if c.useHTTPS { - scheme = "https" - } - host := "" - if c.accountName == StorageEmulatorAccountName { - switch service { - case blobServiceName: - host = storageEmulatorBlob - case tableServiceName: - host = storageEmulatorTable - case queueServiceName: - host = storageEmulatorQueue - } - } else { - host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) - } - - return &url.URL{ - Scheme: scheme, - Host: host, - } -} - -func (c Client) getEndpoint(service, path string, params url.Values) string { - u := c.getBaseURL(service) - - // API doesn't accept path segments not starting with '/' - if !strings.HasPrefix(path, "/") { - path = fmt.Sprintf("/%v", path) - } - - if c.accountName == StorageEmulatorAccountName { - path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path) - } - - u.Path = path - u.RawQuery = params.Encode() - return u.String() -} - -// AccountSASTokenOptions includes options for constructing -// an account SAS token. -// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas -type AccountSASTokenOptions struct { - APIVersion string - Services Services - ResourceTypes ResourceTypes - Permissions Permissions - Start time.Time - Expiry time.Time - IP string - UseHTTPS bool -} - -// Services specify services accessible with an account SAS. -type Services struct { - Blob bool - Queue bool - Table bool - File bool -} - -// ResourceTypes specify the resources accesible with an -// account SAS. -type ResourceTypes struct { - Service bool - Container bool - Object bool -} - -// Permissions specifies permissions for an accountSAS. -type Permissions struct { - Read bool - Write bool - Delete bool - List bool - Add bool - Create bool - Update bool - Process bool -} - -// GetAccountSASToken creates an account SAS token -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas -func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) { - if options.APIVersion == "" { - options.APIVersion = c.apiVersion - } - - if options.APIVersion < "2015-04-05" { - return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion) - } - - // build services string - services := "" - if options.Services.Blob { - services += "b" - } - if options.Services.Queue { - services += "q" - } - if options.Services.Table { - services += "t" - } - if options.Services.File { - services += "f" - } - - // build resources string - resources := "" - if options.ResourceTypes.Service { - resources += "s" - } - if options.ResourceTypes.Container { - resources += "c" - } - if options.ResourceTypes.Object { - resources += "o" - } - - // build permissions string - permissions := "" - if options.Permissions.Read { - permissions += "r" - } - if options.Permissions.Write { - permissions += "w" - } - if options.Permissions.Delete { - permissions += "d" - } - if options.Permissions.List { - permissions += "l" - } - if options.Permissions.Add { - permissions += "a" - } - if options.Permissions.Create { - permissions += "c" - } - if options.Permissions.Update { - permissions += "u" - } - if options.Permissions.Process { - permissions += "p" - } - - // build start time, if exists - start := "" - if options.Start != (time.Time{}) { - start = options.Start.Format(time.RFC3339) - // For some reason I don't understand, it fails when the rest of the string is included - start = start[:10] - } - - // build expiry time - expiry := options.Expiry.Format(time.RFC3339) - // For some reason I don't understand, it fails when the rest of the string is included - expiry = expiry[:10] - - protocol := "https,http" - if options.UseHTTPS { - protocol = "https" - } - - stringToSign := strings.Join([]string{ - c.accountName, - permissions, - services, - resources, - start, - expiry, - options.IP, - protocol, - options.APIVersion, - "", - }, "\n") - signature := c.computeHmac256(stringToSign) - - sasParams := url.Values{ - "sv": {options.APIVersion}, - "ss": {services}, - "srt": {resources}, - "sp": {permissions}, - "se": {expiry}, - "spr": {protocol}, - "sig": {signature}, - } - if start != "" { - sasParams.Add("st", start) - } - if options.IP != "" { - sasParams.Add("sip", options.IP) - } - - return sasParams, nil -} - -// GetBlobService returns a BlobStorageClient which can operate on the blob -// service of the storage account. -func (c Client) GetBlobService() BlobStorageClient { - b := BlobStorageClient{ - client: c, - } - b.client.AddToUserAgent(blobServiceName) - b.auth = sharedKey - if c.UseSharedKeyLite { - b.auth = sharedKeyLite - } - return b -} - -// GetQueueService returns a QueueServiceClient which can operate on the queue -// service of the storage account. -func (c Client) GetQueueService() QueueServiceClient { - q := QueueServiceClient{ - client: c, - } - q.client.AddToUserAgent(queueServiceName) - q.auth = sharedKey - if c.UseSharedKeyLite { - q.auth = sharedKeyLite - } - return q -} - -// GetTableService returns a TableServiceClient which can operate on the table -// service of the storage account. -func (c Client) GetTableService() TableServiceClient { - t := TableServiceClient{ - client: c, - } - t.client.AddToUserAgent(tableServiceName) - t.auth = sharedKeyForTable - if c.UseSharedKeyLite { - t.auth = sharedKeyLiteForTable - } - return t -} - -// GetFileService returns a FileServiceClient which can operate on the file -// service of the storage account. -func (c Client) GetFileService() FileServiceClient { - f := FileServiceClient{ - client: c, - } - f.client.AddToUserAgent(fileServiceName) - f.auth = sharedKey - if c.UseSharedKeyLite { - f.auth = sharedKeyLite - } - return f -} - -func (c Client) getStandardHeaders() map[string]string { - return map[string]string{ - userAgentHeader: c.userAgent, - "x-ms-version": c.apiVersion, - "x-ms-date": currentTimeRfc1123Formatted(), - } -} - -func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*http.Response, error) { - headers, err := c.addAuthorizationHeader(verb, url, headers, auth) - if err != nil { - return nil, err - } - - req, err := http.NewRequest(verb, url, body) - if err != nil { - return nil, errors.New("azure/storage: error creating request: " + err.Error()) - } - - // http.NewRequest() will automatically set req.ContentLength for a handful of types - // otherwise we will handle here. - if req.ContentLength < 1 { - if clstr, ok := headers["Content-Length"]; ok { - if cl, err := strconv.ParseInt(clstr, 10, 64); err == nil { - req.ContentLength = cl - } - } - } - - for k, v := range headers { - req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645 - } - - if c.isAccountSASClient() { - // append the SAS token to the query params - v := req.URL.Query() - v = mergeParams(v, c.accountSASToken) - req.URL.RawQuery = v.Encode() - } - - resp, err := c.Sender.Send(&c, req) - if err != nil { - return nil, err - } - - if resp.StatusCode >= 400 && resp.StatusCode <= 505 { - return resp, getErrorFromResponse(resp) - } - - return resp, nil -} - -func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) { - headers, err := c.addAuthorizationHeader(verb, url, headers, auth) - if err != nil { - return nil, nil, nil, err - } - - req, err := http.NewRequest(verb, url, body) - for k, v := range headers { - req.Header.Add(k, v) - } - - resp, err := c.Sender.Send(&c, req) - if err != nil { - return nil, nil, nil, err - } - - respToRet := &odataResponse{resp: resp} - - statusCode := resp.StatusCode - if statusCode >= 400 && statusCode <= 505 { - var respBody []byte - respBody, err = readAndCloseBody(resp.Body) - if err != nil { - return nil, nil, nil, err - } - - requestID, date, version := getDebugHeaders(resp.Header) - if len(respBody) == 0 { - // no error in response body, might happen in HEAD requests - err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) - return respToRet, req, resp, err - } - // try unmarshal as odata.error json - err = json.Unmarshal(respBody, &respToRet.odata) - } - - return respToRet, req, resp, err -} - -func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { - respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth) - return respToRet, err -} - -func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { - // execute common query, get back generated request, response etc... for more processing. - respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth) - if err != nil { - return nil, err - } - - // return the OData in the case of executing batch commands. - // In this case we need to read the outer batch boundary and contents. - // Then we read the changeset information within the batch - var respBody []byte - respBody, err = readAndCloseBody(resp.Body) - if err != nil { - return nil, err - } - - // outer multipart body - _, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0]) - if err != nil { - return nil, err - } - - // batch details. - batchBoundary := batchHeader["boundary"] - batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody) - if err != nil { - return nil, err - } - - // changeset details. - err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary) - if err != nil { - return nil, err - } - - return respToRet, nil -} - -func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error { - changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary) - changesetPart, err := changesetMultiReader.NextPart() - if err != nil { - return err - } - - changesetPartBufioReader := bufio.NewReader(changesetPart) - changesetResp, err := http.ReadResponse(changesetPartBufioReader, req) - if err != nil { - return err - } - - if changesetResp.StatusCode != http.StatusNoContent { - changesetBody, err := readAndCloseBody(changesetResp.Body) - err = json.Unmarshal(changesetBody, &respToRet.odata) - if err != nil { - return err - } - respToRet.resp = changesetResp - } - - return nil -} - -func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) { - respBodyString := string(respBody) - respBodyReader := strings.NewReader(respBodyString) - - // reading batchresponse - batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary) - batchPart, err := batchMultiReader.NextPart() - if err != nil { - return nil, "", err - } - batchPartBufioReader := bufio.NewReader(batchPart) - - _, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type")) - if err != nil { - return nil, "", err - } - changesetBoundary := changesetHeader["boundary"] - return batchPartBufioReader, changesetBoundary, nil -} - -func readAndCloseBody(body io.ReadCloser) ([]byte, error) { - defer body.Close() - out, err := ioutil.ReadAll(body) - if err == io.EOF { - err = nil - } - return out, err -} - -func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error { - if err := xml.Unmarshal(body, storageErr); err != nil { - storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) - return err - } - return nil -} - -func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error { - odataError := odataErrorWrapper{} - if err := json.Unmarshal(body, &odataError); err != nil { - storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) - return err - } - storageErr.Code = odataError.Err.Code - storageErr.Message = odataError.Err.Message.Value - storageErr.Lang = odataError.Err.Message.Lang - return nil -} - -func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError { - return AzureStorageServiceError{ - StatusCode: code, - Code: status, - RequestID: requestID, - Date: date, - APIVersion: version, - Message: "no response body was available for error status code", - } -} - -func (e AzureStorageServiceError) Error() string { - return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s", - e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue) -} - -// checkRespCode returns UnexpectedStatusError if the given response code is not -// one of the allowed status codes; otherwise nil. -func checkRespCode(resp *http.Response, allowed []int) error { - for _, v := range allowed { - if resp.StatusCode == v { - return nil - } - } - err := getErrorFromResponse(resp) - return UnexpectedStatusCodeError{ - allowed: allowed, - got: resp.StatusCode, - inner: err, - } -} - -func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string { - metadata = c.protectUserAgent(metadata) - for k, v := range metadata { - h[userDefinedMetadataHeaderPrefix+k] = v - } - return h -} - -func getDebugHeaders(h http.Header) (requestID, date, version string) { - requestID = h.Get("x-ms-request-id") - version = h.Get("x-ms-version") - date = h.Get("Date") - return -} - -func getErrorFromResponse(resp *http.Response) error { - respBody, err := readAndCloseBody(resp.Body) - if err != nil { - return err - } - - requestID, date, version := getDebugHeaders(resp.Header) - if len(respBody) == 0 { - // no error in response body, might happen in HEAD requests - err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) - } else { - storageErr := AzureStorageServiceError{ - StatusCode: resp.StatusCode, - RequestID: requestID, - Date: date, - APIVersion: version, - } - // response contains storage service error object, unmarshal - if resp.Header.Get("Content-Type") == "application/xml" { - errIn := serviceErrFromXML(respBody, &storageErr) - if err != nil { // error unmarshaling the error response - err = errIn - } - } else { - errIn := serviceErrFromJSON(respBody, &storageErr) - if err != nil { // error unmarshaling the error response - err = errIn - } - } - err = storageErr - } - return err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go deleted file mode 100644 index e898e9bfa..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go +++ /dev/null @@ -1,38 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "net/url" - "time" -) - -// SASOptions includes options used by SAS URIs for different -// services and resources. -type SASOptions struct { - APIVersion string - Start time.Time - Expiry time.Time - IP string - UseHTTPS bool - Identifier string -} - -func addQueryParameter(query url.Values, key, value string) url.Values { - if value != "" { - query.Add(key, value) - } - return query -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go deleted file mode 100644 index bba44db74..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go +++ /dev/null @@ -1,640 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// Container represents an Azure container. -type Container struct { - bsc *BlobStorageClient - Name string `xml:"Name"` - Properties ContainerProperties `xml:"Properties"` - Metadata map[string]string - sasuri url.URL -} - -// Client returns the HTTP client used by the Container reference. -func (c *Container) Client() *Client { - return &c.bsc.client -} - -func (c *Container) buildPath() string { - return fmt.Sprintf("/%s", c.Name) -} - -// GetURL gets the canonical URL to the container. -// This method does not create a publicly accessible URL if the container -// is private and this method does not check if the blob exists. -func (c *Container) GetURL() string { - container := c.Name - if container == "" { - container = "$root" - } - return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil) -} - -// ContainerSASOptions are options to construct a container SAS -// URI. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -type ContainerSASOptions struct { - ContainerSASPermissions - OverrideHeaders - SASOptions -} - -// ContainerSASPermissions includes the available permissions for -// a container SAS URI. -type ContainerSASPermissions struct { - BlobServiceSASPermissions - List bool -} - -// GetSASURI creates an URL to the container which contains the Shared -// Access Signature with the specified options. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) { - uri := c.GetURL() - signedResource := "c" - canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true) - if err != nil { - return "", err - } - - // build permissions string - permissions := options.BlobServiceSASPermissions.buildString() - if options.List { - permissions += "l" - } - - return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) -} - -// ContainerProperties contains various properties of a container returned from -// various endpoints like ListContainers. -type ContainerProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` - LeaseDuration string `xml:"LeaseDuration"` - PublicAccess ContainerAccessType `xml:"PublicAccess"` -} - -// ContainerListResponse contains the response fields from -// ListContainers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ContainerListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Containers []Container `xml:"Containers>Container"` -} - -// BlobListResponse contains the response fields from ListBlobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type BlobListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Blobs []Blob `xml:"Blobs>Blob"` - - // BlobPrefix is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - // The list here can be thought of as "folders" that may contain - // other folders or blobs. - BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` - - // Delimiter is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - Delimiter string `xml:"Delimiter"` -} - -// IncludeBlobDataset has options to include in a list blobs operation -type IncludeBlobDataset struct { - Snapshots bool - Metadata bool - UncommittedBlobs bool - Copy bool -} - -// ListBlobsParameters defines the set of customizable -// parameters to make a List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type ListBlobsParameters struct { - Prefix string - Delimiter string - Marker string - Include *IncludeBlobDataset - MaxResults uint - Timeout uint - RequestID string -} - -func (p ListBlobsParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Delimiter != "" { - out.Set("delimiter", p.Delimiter) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != nil { - include := []string{} - include = addString(include, p.Include.Snapshots, "snapshots") - include = addString(include, p.Include.Metadata, "metadata") - include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs") - include = addString(include, p.Include.Copy, "copy") - fullInclude := strings.Join(include, ",") - out.Set("include", fullInclude) - } - if p.MaxResults != 0 { - out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) - } - if p.Timeout != 0 { - out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) - } - - return out -} - -func addString(datasets []string, include bool, text string) []string { - if include { - datasets = append(datasets, text) - } - return datasets -} - -// ContainerAccessType defines the access level to the container from a public -// request. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- -// blob-public-access" header. -type ContainerAccessType string - -// Access options for containers -const ( - ContainerAccessTypePrivate ContainerAccessType = "" - ContainerAccessTypeBlob ContainerAccessType = "blob" - ContainerAccessTypeContainer ContainerAccessType = "container" -) - -// ContainerAccessPolicy represents each access policy in the container ACL. -type ContainerAccessPolicy struct { - ID string - StartTime time.Time - ExpiryTime time.Time - CanRead bool - CanWrite bool - CanDelete bool -} - -// ContainerPermissions represents the container ACLs. -type ContainerPermissions struct { - AccessType ContainerAccessType - AccessPolicies []ContainerAccessPolicy -} - -// ContainerAccessHeader references header used when setting/getting container ACL -const ( - ContainerAccessHeader string = "x-ms-blob-public-access" -) - -// GetBlobReference returns a Blob object for the specified blob name. -func (c *Container) GetBlobReference(name string) *Blob { - return &Blob{ - Container: c, - Name: name, - } -} - -// CreateContainerOptions includes the options for a create container operation -type CreateContainerOptions struct { - Timeout uint - Access ContainerAccessType `header:"x-ms-blob-public-access"` - RequestID string `header:"x-ms-client-request-id"` -} - -// Create creates a blob container within the storage account -// with given name and access level. Returns error if container already exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container -func (c *Container) Create(options *CreateContainerOptions) error { - resp, err := c.create(options) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusCreated}) -} - -// CreateIfNotExists creates a blob container if it does not exist. Returns -// true if container is newly created or false if container already exists. -func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) { - resp, err := c.create(options) - if resp != nil { - defer readAndCloseBody(resp.Body) - if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { - return resp.StatusCode == http.StatusCreated, nil - } - } - return false, err -} - -func (c *Container) create(options *CreateContainerOptions) (*http.Response, error) { - query := url.Values{"restype": {"container"}} - headers := c.bsc.client.getStandardHeaders() - headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) - - if options != nil { - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) - - return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) -} - -// Exists returns true if a container with given name exists -// on the storage account, otherwise returns false. -func (c *Container) Exists() (bool, error) { - q := url.Values{"restype": {"container"}} - var uri string - if c.bsc.client.isServiceSASClient() { - q = mergeParams(q, c.sasuri.Query()) - newURI := c.sasuri - newURI.RawQuery = q.Encode() - uri = newURI.String() - - } else { - uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) - } - headers := c.bsc.client.getStandardHeaders() - - resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth) - if resp != nil { - defer readAndCloseBody(resp.Body) - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusOK, nil - } - } - return false, err -} - -// SetContainerPermissionOptions includes options for a set container permissions operation -type SetContainerPermissionOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - RequestID string `header:"x-ms-client-request-id"` -} - -// SetPermissions sets up container permissions -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL -func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error { - body, length, err := generateContainerACLpayload(permissions.AccessPolicies) - if err != nil { - return err - } - params := url.Values{ - "restype": {"container"}, - "comp": {"acl"}, - } - headers := c.bsc.client.getStandardHeaders() - headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType)) - headers["Content-Length"] = strconv.Itoa(length) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) - - resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusOK}) -} - -// GetContainerPermissionOptions includes options for a get container permissions operation -type GetContainerPermissionOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx -// If timeout is 0 then it will not be passed to Azure -// leaseID will only be passed to Azure if populated -func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) { - params := url.Values{ - "restype": {"container"}, - "comp": {"acl"}, - } - headers := c.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) - - resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var ap AccessPolicy - err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) - if err != nil { - return nil, err - } - return buildAccessPolicy(ap, &resp.Header), nil -} - -func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions { - // containerAccess. Blob, Container, empty - containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader)) - permissions := ContainerPermissions{ - AccessType: ContainerAccessType(containerAccess), - AccessPolicies: []ContainerAccessPolicy{}, - } - - for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { - capd := ContainerAccessPolicy{ - ID: policy.ID, - StartTime: policy.AccessPolicy.StartTime, - ExpiryTime: policy.AccessPolicy.ExpiryTime, - } - capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") - capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w") - capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") - - permissions.AccessPolicies = append(permissions.AccessPolicies, capd) - } - return &permissions -} - -// DeleteContainerOptions includes options for a delete container operation -type DeleteContainerOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - RequestID string `header:"x-ms-client-request-id"` -} - -// Delete deletes the container with given name on the storage -// account. If the container does not exist returns error. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container -func (c *Container) Delete(options *DeleteContainerOptions) error { - resp, err := c.delete(options) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusAccepted}) -} - -// DeleteIfExists deletes the container with given name on the storage -// account if it exists. Returns true if container is deleted with this call, or -// false if the container did not exist at the time of the Delete Container -// operation. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container -func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) { - resp, err := c.delete(options) - if resp != nil { - defer readAndCloseBody(resp.Body) - if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusAccepted, nil - } - } - return false, err -} - -func (c *Container) delete(options *DeleteContainerOptions) (*http.Response, error) { - query := url.Values{"restype": {"container"}} - headers := c.bsc.client.getStandardHeaders() - - if options != nil { - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) - - return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth) -} - -// ListBlobs returns an object that contains list of blobs in the container, -// pagination token and other information in the response of List Blobs call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs -func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{ - "restype": {"container"}, - "comp": {"list"}, - }) - var uri string - if c.bsc.client.isServiceSASClient() { - q = mergeParams(q, c.sasuri.Query()) - newURI := c.sasuri - newURI.RawQuery = q.Encode() - uri = newURI.String() - } else { - uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) - } - - headers := c.bsc.client.getStandardHeaders() - headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID) - - var out BlobListResponse - resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - for i := range out.Blobs { - out.Blobs[i].Container = c - } - return out, err -} - -// ContainerMetadataOptions includes options for container metadata operations -type ContainerMetadataOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - RequestID string `header:"x-ms-client-request-id"` -} - -// SetMetadata replaces the metadata for the specified container. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetBlobMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata -func (c *Container) SetMetadata(options *ContainerMetadataOptions) error { - params := url.Values{ - "comp": {"metadata"}, - "restype": {"container"}, - } - headers := c.bsc.client.getStandardHeaders() - headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) - - resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusOK}) -} - -// GetMetadata returns all user-defined metadata for the specified container. -// -// All metadata keys will be returned in lower case. (HTTP header -// names are case-insensitive.) -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-metadata -func (c *Container) GetMetadata(options *ContainerMetadataOptions) error { - params := url.Values{ - "comp": {"metadata"}, - "restype": {"container"}, - } - headers := c.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) - - resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { - return err - } - - c.writeMetadata(resp.Header) - return nil -} - -func (c *Container) writeMetadata(h http.Header) { - c.Metadata = writeMetadata(h) -} - -func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) { - sil := SignedIdentifiers{ - SignedIdentifiers: []SignedIdentifier{}, - } - for _, capd := range policies { - permission := capd.generateContainerPermissions() - signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission) - sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) - } - return xmlMarshal(sil) -} - -func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) { - // generate the permissions string (rwd). - // still want the end user API to have bool flags. - permissions = "" - - if capd.CanRead { - permissions += "r" - } - - if capd.CanWrite { - permissions += "w" - } - - if capd.CanDelete { - permissions += "d" - } - - return permissions -} - -// GetProperties updated the properties of the container. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties -func (c *Container) GetProperties() error { - params := url.Values{ - "restype": {"container"}, - } - headers := c.bsc.client.getStandardHeaders() - - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) - - resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) - if err != nil { - return err - } - defer resp.Body.Close() - if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { - return err - } - - // update properties - c.Properties.Etag = resp.Header.Get(headerEtag) - c.Properties.LeaseStatus = resp.Header.Get("x-ms-lease-status") - c.Properties.LeaseState = resp.Header.Get("x-ms-lease-state") - c.Properties.LeaseDuration = resp.Header.Get("x-ms-lease-duration") - c.Properties.LastModified = resp.Header.Get("Last-Modified") - c.Properties.PublicAccess = ContainerAccessType(resp.Header.Get(ContainerAccessHeader)) - - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go deleted file mode 100644 index 248dfb220..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go +++ /dev/null @@ -1,237 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "time" -) - -const ( - blobCopyStatusPending = "pending" - blobCopyStatusSuccess = "success" - blobCopyStatusAborted = "aborted" - blobCopyStatusFailed = "failed" -) - -// CopyOptions includes the options for a copy blob operation -type CopyOptions struct { - Timeout uint - Source CopyOptionsConditions - Destiny CopyOptionsConditions - RequestID string -} - -// IncrementalCopyOptions includes the options for an incremental copy blob operation -type IncrementalCopyOptions struct { - Timeout uint - Destination IncrementalCopyOptionsConditions - RequestID string -} - -// CopyOptionsConditions includes some conditional options in a copy blob operation -type CopyOptionsConditions struct { - LeaseID string - IfModifiedSince *time.Time - IfUnmodifiedSince *time.Time - IfMatch string - IfNoneMatch string -} - -// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation -type IncrementalCopyOptionsConditions struct { - IfModifiedSince *time.Time - IfUnmodifiedSince *time.Time - IfMatch string - IfNoneMatch string -} - -// Copy starts a blob copy operation and waits for the operation to -// complete. sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using the GetURL method.) There is no SLA on blob copy and therefore -// this helper method works faster on smaller files. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob -func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error { - copyID, err := b.StartCopy(sourceBlob, options) - if err != nil { - return err - } - - return b.WaitForCopy(copyID) -} - -// StartCopy starts a blob copy operation. -// sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using the GetURL method.) -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob -func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-copy-source"] = sourceBlob - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) - // source - headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID) - headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince) - headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince) - headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch) - headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch) - //destiny - headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID) - headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince) - headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince) - headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch) - headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return "", err - } - defer readAndCloseBody(resp.Body) - - if err := checkRespCode(resp, []int{http.StatusAccepted, http.StatusCreated}); err != nil { - return "", err - } - - copyID := resp.Header.Get("x-ms-copy-id") - if copyID == "" { - return "", errors.New("Got empty copy id header") - } - return copyID, nil -} - -// AbortCopyOptions includes the options for an abort blob operation -type AbortCopyOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - RequestID string `header:"x-ms-client-request-id"` -} - -// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function. -// copyID is generated from StartBlobCopy function. -// currentLeaseID is required IF the destination blob has an active lease on it. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob -func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error { - params := url.Values{ - "comp": {"copy"}, - "copyid": {copyID}, - } - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-copy-action"] = "abort" - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -// WaitForCopy loops until a BlobCopy operation is completed (or fails with error) -func (b *Blob) WaitForCopy(copyID string) error { - for { - err := b.GetProperties(nil) - if err != nil { - return err - } - - if b.Properties.CopyID != copyID { - return errBlobCopyIDMismatch - } - - switch b.Properties.CopyStatus { - case blobCopyStatusSuccess: - return nil - case blobCopyStatusPending: - continue - case blobCopyStatusAborted: - return errBlobCopyAborted - case blobCopyStatusFailed: - return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription) - default: - return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus) - } - } -} - -// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob -// sourceBlob parameter must be a valid snapshot URL of the original blob. -// THe original blob mut be public, or use a Shared Access Signature. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob . -func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) { - params := url.Values{"comp": {"incrementalcopy"}} - - // need formatting to 7 decimal places so it's friendly to Windows and *nix - snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z") - u, err := url.Parse(sourceBlobURL) - if err != nil { - return "", err - } - query := u.Query() - query.Add("snapshot", snapshotTimeFormatted) - encodedQuery := query.Encode() - encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1) - u.RawQuery = encodedQuery - snapshotURL := u.String() - - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-copy-source"] = snapshotURL - - if options != nil { - addTimeout(params, options.Timeout) - headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) - headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince) - headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince) - headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch) - headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch) - } - - // get URI of destination blob - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return "", err - } - defer readAndCloseBody(resp.Body) - - if err := checkRespCode(resp, []int{http.StatusAccepted}); err != nil { - return "", err - } - - copyID := resp.Header.Get("x-ms-copy-id") - if copyID == "" { - return "", errors.New("Got empty copy id header") - } - return copyID, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go deleted file mode 100644 index 237d10fd1..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go +++ /dev/null @@ -1,238 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "net/http" - "net/url" - "sync" -) - -// Directory represents a directory on a share. -type Directory struct { - fsc *FileServiceClient - Metadata map[string]string - Name string `xml:"Name"` - parent *Directory - Properties DirectoryProperties - share *Share -} - -// DirectoryProperties contains various properties of a directory. -type DirectoryProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` -} - -// ListDirsAndFilesParameters defines the set of customizable parameters to -// make a List Files and Directories call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files -type ListDirsAndFilesParameters struct { - Prefix string - Marker string - MaxResults uint - Timeout uint -} - -// DirsAndFilesListResponse contains the response fields from -// a List Files and Directories call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files -type DirsAndFilesListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Marker string `xml:"Marker"` - MaxResults int64 `xml:"MaxResults"` - Directories []Directory `xml:"Entries>Directory"` - Files []File `xml:"Entries>File"` - NextMarker string `xml:"NextMarker"` -} - -// builds the complete directory path for this directory object. -func (d *Directory) buildPath() string { - path := "" - current := d - for current.Name != "" { - path = "/" + current.Name + path - current = current.parent - } - return d.share.buildPath() + path -} - -// Create this directory in the associated share. -// If a directory with the same name already exists, the operation fails. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory -func (d *Directory) Create(options *FileRequestOptions) error { - // if this is the root directory exit early - if d.parent == nil { - return nil - } - - params := prepareOptions(options) - headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated}) - if err != nil { - return err - } - - d.updateEtagAndLastModified(headers) - return nil -} - -// CreateIfNotExists creates this directory under the associated share if the -// directory does not exists. Returns true if the directory is newly created or -// false if the directory already exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory -func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) { - // if this is the root directory exit early - if d.parent == nil { - return false, nil - } - - params := prepareOptions(options) - resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil) - if resp != nil { - defer readAndCloseBody(resp.Body) - if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { - if resp.StatusCode == http.StatusCreated { - d.updateEtagAndLastModified(resp.Header) - return true, nil - } - - return false, d.FetchAttributes(nil) - } - } - - return false, err -} - -// Delete removes this directory. It must be empty in order to be deleted. -// If the directory does not exist the operation fails. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory -func (d *Directory) Delete(options *FileRequestOptions) error { - return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options) -} - -// DeleteIfExists removes this directory if it exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory -func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) { - resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options) - if resp != nil { - defer readAndCloseBody(resp.Body) - if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusAccepted, nil - } - } - return false, err -} - -// Exists returns true if this directory exists. -func (d *Directory) Exists() (bool, error) { - exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory) - if exists { - d.updateEtagAndLastModified(headers) - } - return exists, err -} - -// FetchAttributes retrieves metadata for this directory. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties -func (d *Directory) FetchAttributes(options *FileRequestOptions) error { - params := prepareOptions(options) - headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead) - if err != nil { - return err - } - - d.updateEtagAndLastModified(headers) - d.Metadata = getMetadataFromHeaders(headers) - - return nil -} - -// GetDirectoryReference returns a child Directory object for this directory. -func (d *Directory) GetDirectoryReference(name string) *Directory { - return &Directory{ - fsc: d.fsc, - Name: name, - parent: d, - share: d.share, - } -} - -// GetFileReference returns a child File object for this directory. -func (d *Directory) GetFileReference(name string) *File { - return &File{ - fsc: d.fsc, - Name: name, - parent: d, - share: d.share, - mutex: &sync.Mutex{}, - } -} - -// ListDirsAndFiles returns a list of files and directories under this directory. -// It also contains a pagination token and other response details. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files -func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) { - q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory)) - - resp, err := d.fsc.listContent(d.buildPath(), q, nil) - if err != nil { - return nil, err - } - - defer resp.Body.Close() - var out DirsAndFilesListResponse - err = xmlUnmarshal(resp.Body, &out) - return &out, err -} - -// SetMetadata replaces the metadata for this directory. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetDirectoryMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata -func (d *Directory) SetMetadata(options *FileRequestOptions) error { - headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options) - if err != nil { - return err - } - - d.updateEtagAndLastModified(headers) - return nil -} - -// updates Etag and last modified date -func (d *Directory) updateEtagAndLastModified(headers http.Header) { - d.Properties.Etag = headers.Get("Etag") - d.Properties.LastModified = headers.Get("Last-Modified") -} - -// URL gets the canonical URL to this directory. -// This method does not create a publicly accessible URL if the directory -// is private and this method does not check if the directory exists. -func (d *Directory) URL() string { - return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{}) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go deleted file mode 100644 index 5af9b1ef3..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go +++ /dev/null @@ -1,456 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/satori/go.uuid" -) - -// Annotating as secure for gas scanning -/* #nosec */ -const ( - partitionKeyNode = "PartitionKey" - rowKeyNode = "RowKey" - etagErrorTemplate = "Etag didn't match: %v" -) - -var ( - errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation") - errNilPreviousResult = errors.New("The previous results page is nil") - errNilNextLink = errors.New("There are no more pages in this query results") -) - -// Entity represents an entity inside an Azure table. -type Entity struct { - Table *Table - PartitionKey string - RowKey string - TimeStamp time.Time - OdataMetadata string - OdataType string - OdataID string - OdataEtag string - OdataEditLink string - Properties map[string]interface{} -} - -// GetEntityReference returns an Entity object with the specified -// partition key and row key. -func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity { - return &Entity{ - PartitionKey: partitionKey, - RowKey: rowKey, - Table: t, - } -} - -// EntityOptions includes options for entity operations. -type EntityOptions struct { - Timeout uint - RequestID string `header:"x-ms-client-request-id"` -} - -// GetEntityOptions includes options for a get entity operation -type GetEntityOptions struct { - Select []string - RequestID string `header:"x-ms-client-request-id"` -} - -// Get gets the referenced entity. Which properties to get can be -// specified using the select option. -// See: -// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities -// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities -func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error { - if ml == EmptyPayload { - return errEmptyPayload - } - // RowKey and PartitionKey could be lost if not included in the query - // As those are the entity identifiers, it is best if they are not lost - rk := e.RowKey - pk := e.PartitionKey - - query := url.Values{ - "timeout": {strconv.FormatUint(uint64(timeout), 10)}, - } - headers := e.Table.tsc.client.getStandardHeaders() - headers[headerAccept] = string(ml) - - if options != nil { - if len(options.Select) > 0 { - query.Add("$select", strings.Join(options.Select, ",")) - } - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) - resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - return err - } - - respBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - err = json.Unmarshal(respBody, e) - if err != nil { - return err - } - e.PartitionKey = pk - e.RowKey = rk - - return nil -} - -// Insert inserts the referenced entity in its table. -// The function fails if there is an entity with the same -// PartitionKey and RowKey in the table. -// ml determines the level of detail of metadata in the operation response, -// or no data at all. -// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity -func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error { - query, headers := options.getParameters() - headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) - - body, err := json.Marshal(e) - if err != nil { - return err - } - headers = addBodyRelatedHeaders(headers, len(body)) - headers = addReturnContentHeaders(headers, ml) - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query) - resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - - if ml != EmptyPayload { - if err = checkRespCode(resp, []int{http.StatusCreated}); err != nil { - return err - } - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - if err = e.UnmarshalJSON(data); err != nil { - return err - } - } else { - if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { - return err - } - } - - return nil -} - -// Update updates the contents of an entity. The function fails if there is no entity -// with the same PartitionKey and RowKey in the table or if the ETag is different -// than the one in Azure. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2 -func (e *Entity) Update(force bool, options *EntityOptions) error { - return e.updateMerge(force, http.MethodPut, options) -} - -// Merge merges the contents of entity specified with PartitionKey and RowKey -// with the content specified in Properties. -// The function fails if there is no entity with the same PartitionKey and -// RowKey in the table or if the ETag is different than the one in Azure. -// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity -func (e *Entity) Merge(force bool, options *EntityOptions) error { - return e.updateMerge(force, "MERGE", options) -} - -// Delete deletes the entity. -// The function fails if there is no entity with the same PartitionKey and -// RowKey in the table or if the ETag is different than the one in Azure. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1 -func (e *Entity) Delete(force bool, options *EntityOptions) error { - query, headers := options.getParameters() - headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) - - headers = addIfMatchHeader(headers, force, e.OdataEtag) - headers = addReturnContentHeaders(headers, EmptyPayload) - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) - resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth) - if err != nil { - if resp.StatusCode == http.StatusPreconditionFailed { - return fmt.Errorf(etagErrorTemplate, err) - } - return err - } - defer readAndCloseBody(resp.Body) - - if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { - return err - } - - return e.updateTimestamp(resp.Header) -} - -// InsertOrReplace inserts an entity or replaces the existing one. -// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity -func (e *Entity) InsertOrReplace(options *EntityOptions) error { - return e.insertOr(http.MethodPut, options) -} - -// InsertOrMerge inserts an entity or merges the existing one. -// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity -func (e *Entity) InsertOrMerge(options *EntityOptions) error { - return e.insertOr("MERGE", options) -} - -func (e *Entity) buildPath() string { - return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey) -} - -// MarshalJSON is a custom marshaller for entity -func (e *Entity) MarshalJSON() ([]byte, error) { - completeMap := map[string]interface{}{} - completeMap[partitionKeyNode] = e.PartitionKey - completeMap[rowKeyNode] = e.RowKey - for k, v := range e.Properties { - typeKey := strings.Join([]string{k, OdataTypeSuffix}, "") - switch t := v.(type) { - case []byte: - completeMap[typeKey] = OdataBinary - completeMap[k] = t - case time.Time: - completeMap[typeKey] = OdataDateTime - completeMap[k] = t.Format(time.RFC3339Nano) - case uuid.UUID: - completeMap[typeKey] = OdataGUID - completeMap[k] = t.String() - case int64: - completeMap[typeKey] = OdataInt64 - completeMap[k] = fmt.Sprintf("%v", v) - default: - completeMap[k] = v - } - if strings.HasSuffix(k, OdataTypeSuffix) { - if !(completeMap[k] == OdataBinary || - completeMap[k] == OdataDateTime || - completeMap[k] == OdataGUID || - completeMap[k] == OdataInt64) { - return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k) - } - valueKey := strings.TrimSuffix(k, OdataTypeSuffix) - if _, ok := completeMap[valueKey]; !ok { - return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k) - } - } - } - return json.Marshal(completeMap) -} - -// UnmarshalJSON is a custom unmarshaller for entities -func (e *Entity) UnmarshalJSON(data []byte) error { - errorTemplate := "Deserializing error: %v" - - props := map[string]interface{}{} - err := json.Unmarshal(data, &props) - if err != nil { - return err - } - - // deselialize metadata - e.OdataMetadata = stringFromMap(props, "odata.metadata") - e.OdataType = stringFromMap(props, "odata.type") - e.OdataID = stringFromMap(props, "odata.id") - e.OdataEtag = stringFromMap(props, "odata.etag") - e.OdataEditLink = stringFromMap(props, "odata.editLink") - e.PartitionKey = stringFromMap(props, partitionKeyNode) - e.RowKey = stringFromMap(props, rowKeyNode) - - // deserialize timestamp - timeStamp, ok := props["Timestamp"] - if ok { - str, ok := timeStamp.(string) - if !ok { - return fmt.Errorf(errorTemplate, "Timestamp casting error") - } - t, err := time.Parse(time.RFC3339Nano, str) - if err != nil { - return fmt.Errorf(errorTemplate, err) - } - e.TimeStamp = t - } - delete(props, "Timestamp") - delete(props, "Timestamp@odata.type") - - // deserialize entity (user defined fields) - for k, v := range props { - if strings.HasSuffix(k, OdataTypeSuffix) { - valueKey := strings.TrimSuffix(k, OdataTypeSuffix) - str, ok := props[valueKey].(string) - if !ok { - return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v)) - } - switch v { - case OdataBinary: - props[valueKey], err = base64.StdEncoding.DecodeString(str) - if err != nil { - return fmt.Errorf(errorTemplate, err) - } - case OdataDateTime: - t, err := time.Parse("2006-01-02T15:04:05Z", str) - if err != nil { - return fmt.Errorf(errorTemplate, err) - } - props[valueKey] = t - case OdataGUID: - props[valueKey] = uuid.FromStringOrNil(str) - case OdataInt64: - i, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return fmt.Errorf(errorTemplate, err) - } - props[valueKey] = i - default: - return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v)) - } - delete(props, k) - } - } - - e.Properties = props - return nil -} - -func getAndDelete(props map[string]interface{}, key string) interface{} { - if value, ok := props[key]; ok { - delete(props, key) - return value - } - return nil -} - -func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string { - if force { - h[headerIfMatch] = "*" - } else { - h[headerIfMatch] = etag - } - return h -} - -// updates Etag and timestamp -func (e *Entity) updateEtagAndTimestamp(headers http.Header) error { - e.OdataEtag = headers.Get(headerEtag) - return e.updateTimestamp(headers) -} - -func (e *Entity) updateTimestamp(headers http.Header) error { - str := headers.Get(headerDate) - t, err := time.Parse(time.RFC1123, str) - if err != nil { - return fmt.Errorf("Update timestamp error: %v", err) - } - e.TimeStamp = t - return nil -} - -func (e *Entity) insertOr(verb string, options *EntityOptions) error { - query, headers := options.getParameters() - headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) - - body, err := json.Marshal(e) - if err != nil { - return err - } - headers = addBodyRelatedHeaders(headers, len(body)) - headers = addReturnContentHeaders(headers, EmptyPayload) - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) - resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - - if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { - return err - } - - return e.updateEtagAndTimestamp(resp.Header) -} - -func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error { - query, headers := options.getParameters() - headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) - - body, err := json.Marshal(e) - if err != nil { - return err - } - headers = addBodyRelatedHeaders(headers, len(body)) - headers = addIfMatchHeader(headers, force, e.OdataEtag) - headers = addReturnContentHeaders(headers, EmptyPayload) - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) - resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) - if err != nil { - if resp.StatusCode == http.StatusPreconditionFailed { - return fmt.Errorf(etagErrorTemplate, err) - } - return err - } - defer readAndCloseBody(resp.Body) - - if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { - return err - } - - return e.updateEtagAndTimestamp(resp.Header) -} - -func stringFromMap(props map[string]interface{}, key string) string { - value := getAndDelete(props, key) - if value != nil { - return value.(string) - } - return "" -} - -func (options *EntityOptions) getParameters() (url.Values, map[string]string) { - query := url.Values{} - headers := map[string]string{} - if options != nil { - query = addTimeout(query, options.Timeout) - headers = headersFromStruct(*options) - } - return query, headers -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go deleted file mode 100644 index 8afc2e233..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go +++ /dev/null @@ -1,480 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "sync" -) - -const fourMB = uint64(4194304) -const oneTB = uint64(1099511627776) - -// Export maximum range and file sizes -const MaxRangeSize = fourMB -const MaxFileSize = oneTB - -// File represents a file on a share. -type File struct { - fsc *FileServiceClient - Metadata map[string]string - Name string `xml:"Name"` - parent *Directory - Properties FileProperties `xml:"Properties"` - share *Share - FileCopyProperties FileCopyState - mutex *sync.Mutex -} - -// FileProperties contains various properties of a file. -type FileProperties struct { - CacheControl string `header:"x-ms-cache-control"` - Disposition string `header:"x-ms-content-disposition"` - Encoding string `header:"x-ms-content-encoding"` - Etag string - Language string `header:"x-ms-content-language"` - LastModified string - Length uint64 `xml:"Content-Length" header:"x-ms-content-length"` - MD5 string `header:"x-ms-content-md5"` - Type string `header:"x-ms-content-type"` -} - -// FileCopyState contains various properties of a file copy operation. -type FileCopyState struct { - CompletionTime string - ID string `header:"x-ms-copy-id"` - Progress string - Source string - Status string `header:"x-ms-copy-status"` - StatusDesc string -} - -// FileStream contains file data returned from a call to GetFile. -type FileStream struct { - Body io.ReadCloser - ContentMD5 string -} - -// FileRequestOptions will be passed to misc file operations. -// Currently just Timeout (in seconds) but could expand. -type FileRequestOptions struct { - Timeout uint // timeout duration in seconds. -} - -func prepareOptions(options *FileRequestOptions) url.Values { - params := url.Values{} - if options != nil { - params = addTimeout(params, options.Timeout) - } - return params -} - -// FileRanges contains a list of file range information for a file. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges -type FileRanges struct { - ContentLength uint64 - LastModified string - ETag string - FileRanges []FileRange `xml:"Range"` -} - -// FileRange contains range information for a file. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges -type FileRange struct { - Start uint64 `xml:"Start"` - End uint64 `xml:"End"` -} - -func (fr FileRange) String() string { - return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End) -} - -// builds the complete file path for this file object -func (f *File) buildPath() string { - return f.parent.buildPath() + "/" + f.Name -} - -// ClearRange releases the specified range of space in a file. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range -func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error { - var timeout *uint - if options != nil { - timeout = &options.Timeout - } - headers, err := f.modifyRange(nil, fileRange, timeout, nil) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - return nil -} - -// Create creates a new file or replaces an existing one. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File -func (f *File) Create(maxSize uint64, options *FileRequestOptions) error { - if maxSize > oneTB { - return fmt.Errorf("max file size is 1TB") - } - params := prepareOptions(options) - headers := headersFromStruct(f.Properties) - headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10) - headers["x-ms-type"] = "file" - - outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated}) - if err != nil { - return err - } - - f.Properties.Length = maxSize - f.updateEtagAndLastModified(outputHeaders) - return nil -} - -// CopyFile operation copied a file/blob from the sourceURL to the path provided. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file -func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error { - extraHeaders := map[string]string{ - "x-ms-type": "file", - "x-ms-copy-source": sourceURL, - } - params := prepareOptions(options) - - headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted}) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id") - f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status") - return nil -} - -// Delete immediately removes this file from the storage account. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 -func (f *File) Delete(options *FileRequestOptions) error { - return f.fsc.deleteResource(f.buildPath(), resourceFile, options) -} - -// DeleteIfExists removes this file if it exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 -func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) { - resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options) - if resp != nil { - defer readAndCloseBody(resp.Body) - if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusAccepted, nil - } - } - return false, err -} - -// GetFileOptions includes options for a get file operation -type GetFileOptions struct { - Timeout uint - GetContentMD5 bool -} - -// DownloadToStream operation downloads the file. -// -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file -func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) { - params := prepareOptions(options) - resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil) - if err != nil { - return nil, err - } - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - readAndCloseBody(resp.Body) - return nil, err - } - return resp.Body, nil -} - -// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file -func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) { - extraHeaders := map[string]string{ - "Range": fileRange.String(), - } - params := url.Values{} - if options != nil { - if options.GetContentMD5 { - if isRangeTooBig(fileRange) { - return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true") - } - extraHeaders["x-ms-range-get-content-md5"] = "true" - } - params = addTimeout(params, options.Timeout) - } - - resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders) - if err != nil { - return fs, err - } - - if err = checkRespCode(resp, []int{http.StatusOK, http.StatusPartialContent}); err != nil { - readAndCloseBody(resp.Body) - return fs, err - } - - fs.Body = resp.Body - if options != nil && options.GetContentMD5 { - fs.ContentMD5 = resp.Header.Get("Content-MD5") - } - return fs, nil -} - -// Exists returns true if this file exists. -func (f *File) Exists() (bool, error) { - exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile) - if exists { - f.updateEtagAndLastModified(headers) - f.updateProperties(headers) - } - return exists, err -} - -// FetchAttributes updates metadata and properties for this file. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties -func (f *File) FetchAttributes(options *FileRequestOptions) error { - params := prepareOptions(options) - headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - f.updateProperties(headers) - f.Metadata = getMetadataFromHeaders(headers) - return nil -} - -// returns true if the range is larger than 4MB -func isRangeTooBig(fileRange FileRange) bool { - if fileRange.End-fileRange.Start > fourMB { - return true - } - - return false -} - -// ListRangesOptions includes options for a list file ranges operation -type ListRangesOptions struct { - Timeout uint - ListRange *FileRange -} - -// ListRanges returns the list of valid ranges for this file. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges -func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) { - params := url.Values{"comp": {"rangelist"}} - - // add optional range to list - var headers map[string]string - if options != nil { - params = addTimeout(params, options.Timeout) - if options.ListRange != nil { - headers = make(map[string]string) - headers["Range"] = options.ListRange.String() - } - } - - resp, err := f.fsc.listContent(f.buildPath(), params, headers) - if err != nil { - return nil, err - } - - defer resp.Body.Close() - var cl uint64 - cl, err = strconv.ParseUint(resp.Header.Get("x-ms-content-length"), 10, 64) - if err != nil { - ioutil.ReadAll(resp.Body) - return nil, err - } - - var out FileRanges - out.ContentLength = cl - out.ETag = resp.Header.Get("ETag") - out.LastModified = resp.Header.Get("Last-Modified") - - err = xmlUnmarshal(resp.Body, &out) - return &out, err -} - -// modifies a range of bytes in this file -func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) { - if err := f.fsc.checkForStorageEmulator(); err != nil { - return nil, err - } - if fileRange.End < fileRange.Start { - return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart") - } - if bytes != nil && isRangeTooBig(fileRange) { - return nil, errors.New("range cannot exceed 4MB in size") - } - - params := url.Values{"comp": {"range"}} - if timeout != nil { - params = addTimeout(params, *timeout) - } - - uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params) - - // default to clear - write := "clear" - cl := uint64(0) - - // if bytes is not nil then this is an update operation - if bytes != nil { - write = "update" - cl = (fileRange.End - fileRange.Start) + 1 - } - - extraHeaders := map[string]string{ - "Content-Length": strconv.FormatUint(cl, 10), - "Range": fileRange.String(), - "x-ms-write": write, - } - - if contentMD5 != nil { - extraHeaders["Content-MD5"] = *contentMD5 - } - - headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders) - resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.Body) - return resp.Header, checkRespCode(resp, []int{http.StatusCreated}) -} - -// SetMetadata replaces the metadata for this file. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetFileMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata -func (f *File) SetMetadata(options *FileRequestOptions) error { - headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - return nil -} - -// SetProperties sets system properties on this file. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by SetFileProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties -func (f *File) SetProperties(options *FileRequestOptions) error { - headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - return nil -} - -// updates Etag and last modified date -func (f *File) updateEtagAndLastModified(headers http.Header) { - f.Properties.Etag = headers.Get("Etag") - f.Properties.LastModified = headers.Get("Last-Modified") -} - -// updates file properties from the specified HTTP header -func (f *File) updateProperties(header http.Header) { - size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) - if err == nil { - f.Properties.Length = size - } - - f.updateEtagAndLastModified(header) - f.Properties.CacheControl = header.Get("Cache-Control") - f.Properties.Disposition = header.Get("Content-Disposition") - f.Properties.Encoding = header.Get("Content-Encoding") - f.Properties.Language = header.Get("Content-Language") - f.Properties.MD5 = header.Get("Content-MD5") - f.Properties.Type = header.Get("Content-Type") -} - -// URL gets the canonical URL to this file. -// This method does not create a publicly accessible URL if the file -// is private and this method does not check if the file exists. -func (f *File) URL() string { - return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil) -} - -// WriteRangeOptions includes options for a write file range operation -type WriteRangeOptions struct { - Timeout uint - ContentMD5 string -} - -// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside -// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with -// a maximum size of 4MB. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range -func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error { - if bytes == nil { - return errors.New("bytes cannot be nil") - } - var timeout *uint - var md5 *string - if options != nil { - timeout = &options.Timeout - md5 = &options.ContentMD5 - } - - headers, err := f.modifyRange(bytes, fileRange, timeout, md5) - if err != nil { - return err - } - // it's perfectly legal for multiple go routines to call WriteRange - // on the same *File (e.g. concurrently writing non-overlapping ranges) - // so we must take the file mutex before updating our properties. - f.mutex.Lock() - f.updateEtagAndLastModified(headers) - f.mutex.Unlock() - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go deleted file mode 100644 index 6467937d2..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go +++ /dev/null @@ -1,338 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "fmt" - "net/http" - "net/url" - "strconv" -) - -// FileServiceClient contains operations for Microsoft Azure File Service. -type FileServiceClient struct { - client Client - auth authentication -} - -// ListSharesParameters defines the set of customizable parameters to make a -// List Shares call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares -type ListSharesParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} - -// ShareListResponse contains the response fields from -// ListShares call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares -type ShareListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Shares []Share `xml:"Shares>Share"` -} - -type compType string - -const ( - compNone compType = "" - compList compType = "list" - compMetadata compType = "metadata" - compProperties compType = "properties" - compRangeList compType = "rangelist" -) - -func (ct compType) String() string { - return string(ct) -} - -type resourceType string - -const ( - resourceDirectory resourceType = "directory" - resourceFile resourceType = "" - resourceShare resourceType = "share" -) - -func (rt resourceType) String() string { - return string(rt) -} - -func (p ListSharesParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) - } - if p.Timeout != 0 { - out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) - } - - return out -} - -func (p ListDirsAndFilesParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.MaxResults != 0 { - out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) - } - out = addTimeout(out, p.Timeout) - - return out -} - -// returns url.Values for the specified types -func getURLInitValues(comp compType, res resourceType) url.Values { - values := url.Values{} - if comp != compNone { - values.Set("comp", comp.String()) - } - if res != resourceFile { - values.Set("restype", res.String()) - } - return values -} - -// GetShareReference returns a Share object for the specified share name. -func (f *FileServiceClient) GetShareReference(name string) *Share { - return &Share{ - fsc: f, - Name: name, - Properties: ShareProperties{ - Quota: -1, - }, - } -} - -// ListShares returns the list of shares in a storage account along with -// pagination token and other response details. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares -func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - - var out ShareListResponse - resp, err := f.listContent("", q, nil) - if err != nil { - return nil, err - } - defer resp.Body.Close() - err = xmlUnmarshal(resp.Body, &out) - - // assign our client to the newly created Share objects - for i := range out.Shares { - out.Shares[i].fsc = &f - } - return &out, err -} - -// GetServiceProperties gets the properties of your storage account's file service. -// File service does not support logging -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties -func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) { - return f.client.getServiceProperties(fileServiceName, f.auth) -} - -// SetServiceProperties sets the properties of your storage account's file service. -// File service does not support logging -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties -func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error { - return f.client.setServiceProperties(props, fileServiceName, f.auth) -} - -// retrieves directory or share content -func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*http.Response, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - uri := f.client.getEndpoint(fileServiceName, path, params) - extraHeaders = f.client.protectUserAgent(extraHeaders) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth) - if err != nil { - return nil, err - } - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - readAndCloseBody(resp.Body) - return nil, err - } - - return resp, nil -} - -// returns true if the specified resource exists -func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) { - if err := f.checkForStorageEmulator(); err != nil { - return false, nil, err - } - - uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res)) - headers := f.client.getStandardHeaders() - - resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth) - if resp != nil { - defer readAndCloseBody(resp.Body) - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusOK, resp.Header, nil - } - } - return false, nil, err -} - -// creates a resource depending on the specified resource type -func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) { - resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.Body) - return resp.Header, checkRespCode(resp, expectedResponseCodes) -} - -// creates a resource depending on the specified resource type, doesn't close the response body -func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*http.Response, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - values := getURLInitValues(compNone, res) - combinedParams := mergeParams(values, urlParams) - uri := f.client.getEndpoint(fileServiceName, path, combinedParams) - extraHeaders = f.client.protectUserAgent(extraHeaders) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - return f.client.exec(http.MethodPut, uri, headers, nil, f.auth) -} - -// returns HTTP header data for the specified directory or share -func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) { - resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.Body) - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - return nil, err - } - - return resp.Header, nil -} - -// gets the specified resource, doesn't close the response body -func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*http.Response, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - params = mergeParams(params, getURLInitValues(comp, res)) - uri := f.client.getEndpoint(fileServiceName, path, params) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - return f.client.exec(verb, uri, headers, nil, f.auth) -} - -// deletes the resource and returns the response -func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error { - resp, err := f.deleteResourceNoClose(path, res, options) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusAccepted}) -} - -// deletes the resource and returns the response, doesn't close the response body -func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*http.Response, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options)) - uri := f.client.getEndpoint(fileServiceName, path, values) - return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth) -} - -// merges metadata into extraHeaders and returns extraHeaders -func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string { - if metadata == nil && extraHeaders == nil { - return nil - } - if extraHeaders == nil { - extraHeaders = make(map[string]string) - } - for k, v := range metadata { - extraHeaders[userDefinedMetadataHeaderPrefix+k] = v - } - return extraHeaders -} - -// sets extra header data for the specified resource -func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - params := mergeParams(getURLInitValues(comp, res), prepareOptions(options)) - uri := f.client.getEndpoint(fileServiceName, path, params) - extraHeaders = f.client.protectUserAgent(extraHeaders) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.Body) - - return resp.Header, checkRespCode(resp, []int{http.StatusOK}) -} - -//checkForStorageEmulator determines if the client is setup for use with -//Azure Storage Emulator, and returns a relevant error -func (f FileServiceClient) checkForStorageEmulator() error { - if f.client.accountName == StorageEmulatorAccountName { - return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go deleted file mode 100644 index 22ddbcd65..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go +++ /dev/null @@ -1,201 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "net/http" - "net/url" - "strconv" - "time" -) - -// lease constants. -const ( - leaseHeaderPrefix = "x-ms-lease-" - headerLeaseID = "x-ms-lease-id" - leaseAction = "x-ms-lease-action" - leaseBreakPeriod = "x-ms-lease-break-period" - leaseDuration = "x-ms-lease-duration" - leaseProposedID = "x-ms-proposed-lease-id" - leaseTime = "x-ms-lease-time" - - acquireLease = "acquire" - renewLease = "renew" - changeLease = "change" - releaseLease = "release" - breakLease = "break" -) - -// leasePut is common PUT code for the various acquire/release/break etc functions. -func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) { - params := url.Values{"comp": {"lease"}} - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.Body) - - if err := checkRespCode(resp, []int{expectedStatus}); err != nil { - return nil, err - } - - return resp.Header, nil -} - -// LeaseOptions includes options for all operations regarding leasing blobs -type LeaseOptions struct { - Timeout uint - Origin string `header:"Origin"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - RequestID string `header:"x-ms-client-request-id"` -} - -// AcquireLease creates a lease for a blob -// returns leaseID acquired -// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum -// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = acquireLease - - if leaseTimeInSeconds == -1 { - // Do nothing, but don't trigger the following clauses. - } else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" { - leaseTimeInSeconds = 60 - } else if leaseTimeInSeconds < 15 { - leaseTimeInSeconds = 15 - } - - headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) - - if proposedLeaseID != "" { - headers[leaseProposedID] = proposedLeaseID - } - - respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options) - if err != nil { - return "", err - } - - returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) - - if returnedLeaseID != "" { - return returnedLeaseID, nil - } - - return "", errors.New("LeaseID not returned") -} - -// BreakLease breaks the lease for a blob -// Returns the timeout remaining in the lease in seconds -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = breakLease - return b.breakLeaseCommon(headers, options) -} - -// BreakLeaseWithBreakPeriod breaks the lease for a blob -// breakPeriodInSeconds is used to determine how long until new lease can be created. -// Returns the timeout remaining in the lease in seconds -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = breakLease - headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) - return b.breakLeaseCommon(headers, options) -} - -// breakLeaseCommon is common code for both version of BreakLease (with and without break period) -func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) { - - respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options) - if err != nil { - return 0, err - } - - breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) - if breakTimeoutStr != "" { - breakTimeout, err = strconv.Atoi(breakTimeoutStr) - if err != nil { - return 0, err - } - } - - return breakTimeout, nil -} - -// ChangeLease changes a lease ID for a blob -// Returns the new LeaseID acquired -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = changeLease - headers[headerLeaseID] = currentLeaseID - headers[leaseProposedID] = proposedLeaseID - - respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options) - if err != nil { - return "", err - } - - newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) - if newLeaseID != "" { - return newLeaseID, nil - } - - return "", errors.New("LeaseID not returned") -} - -// ReleaseLease releases the lease for a blob -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = releaseLease - headers[headerLeaseID] = currentLeaseID - - _, err := b.leaseCommonPut(headers, http.StatusOK, options) - if err != nil { - return err - } - - return nil -} - -// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = renewLease - headers[headerLeaseID] = currentLeaseID - - _, err := b.leaseCommonPut(headers, http.StatusOK, options) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go deleted file mode 100644 index ff13704ea..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go +++ /dev/null @@ -1,170 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "fmt" - "net/http" - "net/url" - "strconv" - "time" -) - -// Message represents an Azure message. -type Message struct { - Queue *Queue - Text string `xml:"MessageText"` - ID string `xml:"MessageId"` - Insertion TimeRFC1123 `xml:"InsertionTime"` - Expiration TimeRFC1123 `xml:"ExpirationTime"` - PopReceipt string `xml:"PopReceipt"` - NextVisible TimeRFC1123 `xml:"TimeNextVisible"` - DequeueCount int `xml:"DequeueCount"` -} - -func (m *Message) buildPath() string { - return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID) -} - -// PutMessageOptions is the set of options can be specified for Put Messsage -// operation. A zero struct does not use any preferences for the request. -type PutMessageOptions struct { - Timeout uint - VisibilityTimeout int - MessageTTL int - RequestID string `header:"x-ms-client-request-id"` -} - -// Put operation adds a new message to the back of the message queue. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message -func (m *Message) Put(options *PutMessageOptions) error { - query := url.Values{} - headers := m.Queue.qsc.client.getStandardHeaders() - - req := putMessageRequest{MessageText: m.Text} - body, nn, err := xmlMarshal(req) - if err != nil { - return err - } - headers["Content-Length"] = strconv.Itoa(nn) - - if options != nil { - if options.VisibilityTimeout != 0 { - query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) - } - if options.MessageTTL != 0 { - query.Set("messagettl", strconv.Itoa(options.MessageTTL)) - } - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - - uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query) - resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - err = checkRespCode(resp, []int{http.StatusCreated}) - if err != nil { - return err - } - err = xmlUnmarshal(resp.Body, m) - if err != nil { - return err - } - return nil -} - -// UpdateMessageOptions is the set of options can be specified for Update Messsage -// operation. A zero struct does not use any preferences for the request. -type UpdateMessageOptions struct { - Timeout uint - VisibilityTimeout int - RequestID string `header:"x-ms-client-request-id"` -} - -// Update operation updates the specified message. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message -func (m *Message) Update(options *UpdateMessageOptions) error { - query := url.Values{} - if m.PopReceipt != "" { - query.Set("popreceipt", m.PopReceipt) - } - - headers := m.Queue.qsc.client.getStandardHeaders() - req := putMessageRequest{MessageText: m.Text} - body, nn, err := xmlMarshal(req) - if err != nil { - return err - } - headers["Content-Length"] = strconv.Itoa(nn) - - if options != nil { - if options.VisibilityTimeout != 0 { - query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) - } - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query) - - resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - - m.PopReceipt = resp.Header.Get("x-ms-popreceipt") - nextTimeStr := resp.Header.Get("x-ms-time-next-visible") - if nextTimeStr != "" { - nextTime, err := time.Parse(time.RFC1123, nextTimeStr) - if err != nil { - return err - } - m.NextVisible = TimeRFC1123(nextTime) - } - - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -// Delete operation deletes the specified message. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx -func (m *Message) Delete(options *QueueServiceOptions) error { - params := url.Values{"popreceipt": {m.PopReceipt}} - headers := m.Queue.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params) - - resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -type putMessageRequest struct { - XMLName xml.Name `xml:"QueueMessage"` - MessageText string `xml:"MessageText"` -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go deleted file mode 100644 index 800adf129..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go +++ /dev/null @@ -1,47 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// MetadataLevel determines if operations should return a paylod, -// and it level of detail. -type MetadataLevel string - -// This consts are meant to help with Odata supported operations -const ( - OdataTypeSuffix = "@odata.type" - - // Types - - OdataBinary = "Edm.Binary" - OdataDateTime = "Edm.DateTime" - OdataGUID = "Edm.Guid" - OdataInt64 = "Edm.Int64" - - // Query options - - OdataFilter = "$filter" - OdataOrderBy = "$orderby" - OdataTop = "$top" - OdataSkip = "$skip" - OdataCount = "$count" - OdataExpand = "$expand" - OdataSelect = "$select" - OdataSearch = "$search" - - EmptyPayload MetadataLevel = "" - NoMetadata MetadataLevel = "application/json;odata=nometadata" - MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata" - FullMetadata MetadataLevel = "application/json;odata=fullmetadata" -) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go deleted file mode 100644 index 007e19e61..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go +++ /dev/null @@ -1,203 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "time" -) - -// GetPageRangesResponse contains the response fields from -// Get Page Ranges call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type GetPageRangesResponse struct { - XMLName xml.Name `xml:"PageList"` - PageList []PageRange `xml:"PageRange"` -} - -// PageRange contains information about a page of a page blob from -// Get Pages Range call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type PageRange struct { - Start int64 `xml:"Start"` - End int64 `xml:"End"` -} - -var ( - errBlobCopyAborted = errors.New("storage: blob copy is aborted") - errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") -) - -// PutPageOptions includes the options for a put page operation -type PutPageOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"` - IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"` - IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// WriteRange writes a range of pages to a page blob. -// Ranges must be aligned with 512-byte boundaries and chunk must be of size -// multiplies by 512. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page -func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { - if bytes == nil { - return errors.New("bytes cannot be nil") - } - return b.modifyRange(blobRange, bytes, options) -} - -// ClearRange clears the given range in a page blob. -// Ranges must be aligned with 512-byte boundaries and chunk must be of size -// multiplies by 512. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page -func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error { - return b.modifyRange(blobRange, nil, options) -} - -func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { - if blobRange.End < blobRange.Start { - return errors.New("the value for rangeEnd must be greater than or equal to rangeStart") - } - if blobRange.Start%512 != 0 { - return errors.New("the value for rangeStart must be a multiple of 512") - } - if blobRange.End%512 != 511 { - return errors.New("the value for rangeEnd must be a multiple of 512 - 1") - } - - params := url.Values{"comp": {"page"}} - - // default to clear - write := "clear" - var cl uint64 - - // if bytes is not nil then this is an update operation - if bytes != nil { - write = "update" - cl = (blobRange.End - blobRange.Start) + 1 - } - - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-page-write"] = write - headers["x-ms-range"] = blobRange.String() - headers["Content-Length"] = fmt.Sprintf("%v", cl) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusCreated}) -} - -// GetPageRangesOptions includes the options for a get page ranges operation -type GetPageRangesOptions struct { - Timeout uint - Snapshot *time.Time - PreviousSnapshot *time.Time - Range *BlobRange - LeaseID string `header:"x-ms-lease-id"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetPageRanges returns the list of valid page ranges for a page blob. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges -func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) { - params := url.Values{"comp": {"pagelist"}} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - if options.PreviousSnapshot != nil { - params.Add("prevsnapshot", timeRFC3339Formatted(*options.PreviousSnapshot)) - } - if options.Range != nil { - headers["Range"] = options.Range.String() - } - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - var out GetPageRangesResponse - resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return out, err - } - defer readAndCloseBody(resp.Body) - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - return out, err - } - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// PutPageBlob initializes an empty page blob with specified name and maximum -// size in bytes (size must be aligned to a 512-byte boundary). A page blob must -// be created using this method before writing pages. -// -// See CreateBlockBlobFromReader for more info on creating blobs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob -func (b *Blob) PutPageBlob(options *PutBlobOptions) error { - if b.Properties.ContentLength%512 != 0 { - return errors.New("Content length must be aligned to a 512-byte boundary") - } - - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength) - headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber) - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypePage) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go deleted file mode 100644 index 9a821c56a..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ /dev/null @@ -1,436 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "time" -) - -const ( - // casing is per Golang's http.Header canonicalizing the header names. - approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" -) - -// QueueAccessPolicy represents each access policy in the queue ACL. -type QueueAccessPolicy struct { - ID string - StartTime time.Time - ExpiryTime time.Time - CanRead bool - CanAdd bool - CanUpdate bool - CanProcess bool -} - -// QueuePermissions represents the queue ACLs. -type QueuePermissions struct { - AccessPolicies []QueueAccessPolicy -} - -// SetQueuePermissionOptions includes options for a set queue permissions operation -type SetQueuePermissionOptions struct { - Timeout uint - RequestID string `header:"x-ms-client-request-id"` -} - -// Queue represents an Azure queue. -type Queue struct { - qsc *QueueServiceClient - Name string - Metadata map[string]string - AproxMessageCount uint64 -} - -func (q *Queue) buildPath() string { - return fmt.Sprintf("/%s", q.Name) -} - -func (q *Queue) buildPathMessages() string { - return fmt.Sprintf("%s/messages", q.buildPath()) -} - -// QueueServiceOptions includes options for some queue service operations -type QueueServiceOptions struct { - Timeout uint - RequestID string `header:"x-ms-client-request-id"` -} - -// Create operation creates a queue under the given account. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4 -func (q *Queue) Create(options *QueueServiceOptions) error { - params := url.Values{} - headers := q.qsc.client.getStandardHeaders() - headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - - resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusCreated}) -} - -// Delete operation permanently deletes the specified queue. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3 -func (q *Queue) Delete(options *QueueServiceOptions) error { - params := url.Values{} - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -// Exists returns true if a queue with given name exists. -func (q *Queue) Exists() (bool, error) { - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) - resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth) - if resp != nil { - defer readAndCloseBody(resp.Body) - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusOK, nil - } - err = getErrorFromResponse(resp) - } - return false, err -} - -// SetMetadata operation sets user-defined metadata on the specified queue. -// Metadata is associated with the queue as name-value pairs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata -func (q *Queue) SetMetadata(options *QueueServiceOptions) error { - params := url.Values{"comp": {"metadata"}} - headers := q.qsc.client.getStandardHeaders() - headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - - resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -// GetMetadata operation retrieves user-defined metadata and queue -// properties on the specified queue. Metadata is associated with -// the queue as name-values pairs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata -// -// Because the way Golang's http client (and http.Header in particular) -// canonicalize header names, the returned metadata names would always -// be all lower case. -func (q *Queue) GetMetadata(options *QueueServiceOptions) error { - params := url.Values{"comp": {"metadata"}} - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) - - resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - - if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { - return err - } - - aproxMessagesStr := resp.Header.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader)) - if aproxMessagesStr != "" { - aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64) - if err != nil { - return err - } - q.AproxMessageCount = aproxMessages - } - - q.Metadata = getMetadataFromHeaders(resp.Header) - return nil -} - -// GetMessageReference returns a message object with the specified text. -func (q *Queue) GetMessageReference(text string) *Message { - return &Message{ - Queue: q, - Text: text, - } -} - -// GetMessagesOptions is the set of options can be specified for Get -// Messsages operation. A zero struct does not use any preferences for the -// request. -type GetMessagesOptions struct { - Timeout uint - NumOfMessages int - VisibilityTimeout int - RequestID string `header:"x-ms-client-request-id"` -} - -type messages struct { - XMLName xml.Name `xml:"QueueMessagesList"` - Messages []Message `xml:"QueueMessage"` -} - -// GetMessages operation retrieves one or more messages from the front of the -// queue. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages -func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) { - query := url.Values{} - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - if options.NumOfMessages != 0 { - query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) - } - if options.VisibilityTimeout != 0 { - query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) - } - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) - - resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) - if err != nil { - return []Message{}, err - } - defer resp.Body.Close() - - var out messages - err = xmlUnmarshal(resp.Body, &out) - if err != nil { - return []Message{}, err - } - for i := range out.Messages { - out.Messages[i].Queue = q - } - return out.Messages, err -} - -// PeekMessagesOptions is the set of options can be specified for Peek -// Messsage operation. A zero struct does not use any preferences for the -// request. -type PeekMessagesOptions struct { - Timeout uint - NumOfMessages int - RequestID string `header:"x-ms-client-request-id"` -} - -// PeekMessages retrieves one or more messages from the front of the queue, but -// does not alter the visibility of the message. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages -func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) { - query := url.Values{"peekonly": {"true"}} // Required for peek operation - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - if options.NumOfMessages != 0 { - query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) - } - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) - - resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) - if err != nil { - return []Message{}, err - } - defer resp.Body.Close() - - var out messages - err = xmlUnmarshal(resp.Body, &out) - if err != nil { - return []Message{}, err - } - for i := range out.Messages { - out.Messages[i].Queue = q - } - return out.Messages, err -} - -// ClearMessages operation deletes all messages from the specified queue. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages -func (q *Queue) ClearMessages(options *QueueServiceOptions) error { - params := url.Values{} - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params) - - resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -// SetPermissions sets up queue permissions -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl -func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error { - body, length, err := generateQueueACLpayload(permissions.AccessPolicies) - if err != nil { - return err - } - - params := url.Values{ - "comp": {"acl"}, - } - headers := q.qsc.client.getStandardHeaders() - headers["Content-Length"] = strconv.Itoa(length) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) { - sil := SignedIdentifiers{ - SignedIdentifiers: []SignedIdentifier{}, - } - for _, qapd := range policies { - permission := qapd.generateQueuePermissions() - signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission) - sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) - } - return xmlMarshal(sil) -} - -func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) { - // generate the permissions string (raup). - // still want the end user API to have bool flags. - permissions = "" - - if qapd.CanRead { - permissions += "r" - } - - if qapd.CanAdd { - permissions += "a" - } - - if qapd.CanUpdate { - permissions += "u" - } - - if qapd.CanProcess { - permissions += "p" - } - - return permissions -} - -// GetQueuePermissionOptions includes options for a get queue permissions operation -type GetQueuePermissionOptions struct { - Timeout uint - RequestID string `header:"x-ms-client-request-id"` -} - -// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl -// If timeout is 0 then it will not be passed to Azure -func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) { - params := url.Values{ - "comp": {"acl"}, - } - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var ap AccessPolicy - err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) - if err != nil { - return nil, err - } - return buildQueueAccessPolicy(ap, &resp.Header), nil -} - -func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions { - permissions := QueuePermissions{ - AccessPolicies: []QueueAccessPolicy{}, - } - - for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { - qapd := QueueAccessPolicy{ - ID: policy.ID, - StartTime: policy.AccessPolicy.StartTime, - ExpiryTime: policy.AccessPolicy.ExpiryTime, - } - qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") - qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a") - qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") - qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p") - - permissions.AccessPolicies = append(permissions.AccessPolicies, qapd) - } - return &permissions -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go deleted file mode 100644 index 28d9ab937..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go +++ /dev/null @@ -1,146 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "net/url" - "strings" - "time" -) - -// QueueSASOptions are options to construct a blob SAS -// URI. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -type QueueSASOptions struct { - QueueSASPermissions - SASOptions -} - -// QueueSASPermissions includes the available permissions for -// a queue SAS URI. -type QueueSASPermissions struct { - Read bool - Add bool - Update bool - Process bool -} - -func (q QueueSASPermissions) buildString() string { - permissions := "" - - if q.Read { - permissions += "r" - } - if q.Add { - permissions += "a" - } - if q.Update { - permissions += "u" - } - if q.Process { - permissions += "p" - } - return permissions -} - -// GetSASURI creates an URL to the specified queue which contains the Shared -// Access Signature with specified permissions and expiration time. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) { - canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true) - if err != nil { - return "", err - } - - // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. - // It must include the service name (blob, table, queue or file) for version 2015-02-21 or - // later, the storage account name, and the resource name, and must be URL-decoded. - // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). - canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) - canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) - if err != nil { - return "", err - } - - signedStart := "" - if options.Start != (time.Time{}) { - signedStart = options.Start.UTC().Format(time.RFC3339) - } - signedExpiry := options.Expiry.UTC().Format(time.RFC3339) - - protocols := "https,http" - if options.UseHTTPS { - protocols = "https" - } - - permissions := options.QueueSASPermissions.buildString() - stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier) - if err != nil { - return "", err - } - - sig := q.qsc.client.computeHmac256(stringToSign) - sasParams := url.Values{ - "sv": {q.qsc.client.apiVersion}, - "se": {signedExpiry}, - "sp": {permissions}, - "sig": {sig}, - } - - if q.qsc.client.apiVersion >= "2015-04-05" { - sasParams.Add("spr", protocols) - addQueryParameter(sasParams, "sip", options.IP) - } - - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil) - sasURL, err := url.Parse(uri) - if err != nil { - return "", err - } - sasURL.RawQuery = sasParams.Encode() - return sasURL.String(), nil -} - -func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) { - - if signedVersion >= "2015-02-21" { - canonicalizedResource = "/queue" + canonicalizedResource - } - - // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 - if signedVersion >= "2015-04-05" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", - signedPermissions, - signedStart, - signedExpiry, - canonicalizedResource, - signedIdentifier, - signedIP, - protocols, - signedVersion), nil - - } - - // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - if signedVersion >= "2013-08-15" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil - } - - return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go deleted file mode 100644 index 29febe146..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go +++ /dev/null @@ -1,42 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// QueueServiceClient contains operations for Microsoft Azure Queue Storage -// Service. -type QueueServiceClient struct { - client Client - auth authentication -} - -// GetServiceProperties gets the properties of your storage account's queue service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties -func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) { - return q.client.getServiceProperties(queueServiceName, q.auth) -} - -// SetServiceProperties sets the properties of your storage account's queue service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties -func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error { - return q.client.setServiceProperties(props, queueServiceName, q.auth) -} - -// GetQueueReference returns a Container object for the specified queue name. -func (q *QueueServiceClient) GetQueueReference(name string) *Queue { - return &Queue{ - qsc: q, - Name: name, - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go deleted file mode 100644 index 0ded50107..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go +++ /dev/null @@ -1,216 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" - "net/url" - "strconv" -) - -// Share represents an Azure file share. -type Share struct { - fsc *FileServiceClient - Name string `xml:"Name"` - Properties ShareProperties `xml:"Properties"` - Metadata map[string]string -} - -// ShareProperties contains various properties of a share. -type ShareProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - Quota int `xml:"Quota"` -} - -// builds the complete path for this share object. -func (s *Share) buildPath() string { - return fmt.Sprintf("/%s", s.Name) -} - -// Create this share under the associated account. -// If a share with the same name already exists, the operation fails. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share -func (s *Share) Create(options *FileRequestOptions) error { - extraheaders := map[string]string{} - if s.Properties.Quota > 0 { - extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) - } - - params := prepareOptions(options) - headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated}) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - return nil -} - -// CreateIfNotExists creates this share under the associated account if -// it does not exist. Returns true if the share is newly created or false if -// the share already exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share -func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) { - extraheaders := map[string]string{} - if s.Properties.Quota > 0 { - extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) - } - - params := prepareOptions(options) - resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders) - if resp != nil { - defer readAndCloseBody(resp.Body) - if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { - if resp.StatusCode == http.StatusCreated { - s.updateEtagAndLastModified(resp.Header) - return true, nil - } - return false, s.FetchAttributes(nil) - } - } - - return false, err -} - -// Delete marks this share for deletion. The share along with any files -// and directories contained within it are later deleted during garbage -// collection. If the share does not exist the operation fails -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share -func (s *Share) Delete(options *FileRequestOptions) error { - return s.fsc.deleteResource(s.buildPath(), resourceShare, options) -} - -// DeleteIfExists operation marks this share for deletion if it exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share -func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) { - resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options) - if resp != nil { - defer readAndCloseBody(resp.Body) - if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusAccepted, nil - } - } - return false, err -} - -// Exists returns true if this share already exists -// on the storage account, otherwise returns false. -func (s *Share) Exists() (bool, error) { - exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare) - if exists { - s.updateEtagAndLastModified(headers) - s.updateQuota(headers) - } - return exists, err -} - -// FetchAttributes retrieves metadata and properties for this share. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties -func (s *Share) FetchAttributes(options *FileRequestOptions) error { - params := prepareOptions(options) - headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - s.updateQuota(headers) - s.Metadata = getMetadataFromHeaders(headers) - - return nil -} - -// GetRootDirectoryReference returns a Directory object at the root of this share. -func (s *Share) GetRootDirectoryReference() *Directory { - return &Directory{ - fsc: s.fsc, - share: s, - } -} - -// ServiceClient returns the FileServiceClient associated with this share. -func (s *Share) ServiceClient() *FileServiceClient { - return s.fsc -} - -// SetMetadata replaces the metadata for this share. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetShareMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata -func (s *Share) SetMetadata(options *FileRequestOptions) error { - headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - return nil -} - -// SetProperties sets system properties for this share. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by SetShareProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties -func (s *Share) SetProperties(options *FileRequestOptions) error { - extraheaders := map[string]string{} - if s.Properties.Quota > 0 { - if s.Properties.Quota > 5120 { - return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota) - } - extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) - } - - headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - return nil -} - -// updates Etag and last modified date -func (s *Share) updateEtagAndLastModified(headers http.Header) { - s.Properties.Etag = headers.Get("Etag") - s.Properties.LastModified = headers.Get("Last-Modified") -} - -// updates quota value -func (s *Share) updateQuota(headers http.Header) { - quota, err := strconv.Atoi(headers.Get("x-ms-share-quota")) - if err == nil { - s.Properties.Quota = quota - } -} - -// URL gets the canonical URL to this share. This method does not create a publicly accessible -// URL if the share is private and this method does not check if the share exists. -func (s *Share) URL() string { - return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{}) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go deleted file mode 100644 index 056ab398a..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go +++ /dev/null @@ -1,61 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "strings" - "time" -) - -// AccessPolicyDetailsXML has specifics about an access policy -// annotated with XML details. -type AccessPolicyDetailsXML struct { - StartTime time.Time `xml:"Start"` - ExpiryTime time.Time `xml:"Expiry"` - Permission string `xml:"Permission"` -} - -// SignedIdentifier is a wrapper for a specific policy -type SignedIdentifier struct { - ID string `xml:"Id"` - AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"` -} - -// SignedIdentifiers part of the response from GetPermissions call. -type SignedIdentifiers struct { - SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` -} - -// AccessPolicy is the response type from the GetPermissions call. -type AccessPolicy struct { - SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"` -} - -// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the -// AccessPolicy struct which will get converted to XML. -func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier { - return SignedIdentifier{ - ID: id, - AccessPolicy: AccessPolicyDetailsXML{ - StartTime: startTime.UTC().Round(time.Second), - ExpiryTime: expiryTime.UTC().Round(time.Second), - Permission: permissions, - }, - } -} - -func updatePermissions(permissions, permission string) bool { - return strings.Contains(permissions, permission) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go deleted file mode 100644 index a68ad0930..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go +++ /dev/null @@ -1,131 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "net/http" - "net/url" - "strconv" -) - -// ServiceProperties represents the storage account service properties -type ServiceProperties struct { - Logging *Logging - HourMetrics *Metrics - MinuteMetrics *Metrics - Cors *Cors -} - -// Logging represents the Azure Analytics Logging settings -type Logging struct { - Version string - Delete bool - Read bool - Write bool - RetentionPolicy *RetentionPolicy -} - -// RetentionPolicy indicates if retention is enabled and for how many days -type RetentionPolicy struct { - Enabled bool - Days *int -} - -// Metrics provide request statistics. -type Metrics struct { - Version string - Enabled bool - IncludeAPIs *bool - RetentionPolicy *RetentionPolicy -} - -// Cors includes all the CORS rules -type Cors struct { - CorsRule []CorsRule -} - -// CorsRule includes all settings for a Cors rule -type CorsRule struct { - AllowedOrigins string - AllowedMethods string - MaxAgeInSeconds int - ExposedHeaders string - AllowedHeaders string -} - -func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) { - query := url.Values{ - "restype": {"service"}, - "comp": {"properties"}, - } - uri := c.getEndpoint(service, "", query) - headers := c.getStandardHeaders() - - resp, err := c.exec(http.MethodGet, uri, headers, nil, auth) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { - return nil, err - } - - var out ServiceProperties - err = xmlUnmarshal(resp.Body, &out) - if err != nil { - return nil, err - } - - return &out, nil -} - -func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error { - query := url.Values{ - "restype": {"service"}, - "comp": {"properties"}, - } - uri := c.getEndpoint(service, "", query) - - // Ideally, StorageServiceProperties would be the output struct - // This is to avoid golint stuttering, while generating the correct XML - type StorageServiceProperties struct { - Logging *Logging - HourMetrics *Metrics - MinuteMetrics *Metrics - Cors *Cors - } - input := StorageServiceProperties{ - Logging: props.Logging, - HourMetrics: props.HourMetrics, - MinuteMetrics: props.MinuteMetrics, - Cors: props.Cors, - } - - body, length, err := xmlMarshal(input) - if err != nil { - return err - } - - headers := c.getStandardHeaders() - headers["Content-Length"] = strconv.Itoa(length) - - resp, err := c.exec(http.MethodPut, uri, headers, body, auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - return checkRespCode(resp, []int{http.StatusAccepted}) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go deleted file mode 100644 index b96ca6e12..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go +++ /dev/null @@ -1,419 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -const ( - tablesURIPath = "/Tables" - nextTableQueryParameter = "NextTableName" - headerNextPartitionKey = "x-ms-continuation-NextPartitionKey" - headerNextRowKey = "x-ms-continuation-NextRowKey" - nextPartitionKeyQueryParameter = "NextPartitionKey" - nextRowKeyQueryParameter = "NextRowKey" -) - -// TableAccessPolicy are used for SETTING table policies -type TableAccessPolicy struct { - ID string - StartTime time.Time - ExpiryTime time.Time - CanRead bool - CanAppend bool - CanUpdate bool - CanDelete bool -} - -// Table represents an Azure table. -type Table struct { - tsc *TableServiceClient - Name string `json:"TableName"` - OdataEditLink string `json:"odata.editLink"` - OdataID string `json:"odata.id"` - OdataMetadata string `json:"odata.metadata"` - OdataType string `json:"odata.type"` -} - -// EntityQueryResult contains the response from -// ExecuteQuery and ExecuteQueryNextResults functions. -type EntityQueryResult struct { - OdataMetadata string `json:"odata.metadata"` - Entities []*Entity `json:"value"` - QueryNextLink - table *Table -} - -type continuationToken struct { - NextPartitionKey string - NextRowKey string -} - -func (t *Table) buildPath() string { - return fmt.Sprintf("/%s", t.Name) -} - -func (t *Table) buildSpecificPath() string { - return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name) -} - -// Get gets the referenced table. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities -func (t *Table) Get(timeout uint, ml MetadataLevel) error { - if ml == EmptyPayload { - return errEmptyPayload - } - - query := url.Values{ - "timeout": {strconv.FormatUint(uint64(timeout), 10)}, - } - headers := t.tsc.client.getStandardHeaders() - headers[headerAccept] = string(ml) - - uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query) - resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) - if err != nil { - return err - } - defer resp.Body.Close() - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - return err - } - - respBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - err = json.Unmarshal(respBody, t) - if err != nil { - return err - } - return nil -} - -// Create creates the referenced table. -// This function fails if the name is not compliant -// with the specification or the tables already exists. -// ml determines the level of detail of metadata in the operation response, -// or no data at all. -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table -func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error { - uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{ - "timeout": {strconv.FormatUint(uint64(timeout), 10)}, - }) - - type createTableRequest struct { - TableName string `json:"TableName"` - } - req := createTableRequest{TableName: t.Name} - buf := new(bytes.Buffer) - if err := json.NewEncoder(buf).Encode(req); err != nil { - return err - } - - headers := t.tsc.client.getStandardHeaders() - headers = addReturnContentHeaders(headers, ml) - headers = addBodyRelatedHeaders(headers, buf.Len()) - headers = options.addToHeaders(headers) - - resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth) - if err != nil { - return err - } - defer resp.Body.Close() - - if ml == EmptyPayload { - if err := checkRespCode(resp, []int{http.StatusNoContent}); err != nil { - return err - } - } else { - if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil { - return err - } - } - - if ml != EmptyPayload { - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - err = json.Unmarshal(data, t) - if err != nil { - return err - } - } - - return nil -} - -// Delete deletes the referenced table. -// This function fails if the table is not present. -// Be advised: Delete deletes all the entries that may be present. -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table -func (t *Table) Delete(timeout uint, options *TableOptions) error { - uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{ - "timeout": {strconv.Itoa(int(timeout))}, - }) - - headers := t.tsc.client.getStandardHeaders() - headers = addReturnContentHeaders(headers, EmptyPayload) - headers = options.addToHeaders(headers) - - resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -// QueryOptions includes options for a query entities operation. -// Top, filter and select are OData query options. -type QueryOptions struct { - Top uint - Filter string - Select []string - RequestID string -} - -func (options *QueryOptions) getParameters() (url.Values, map[string]string) { - query := url.Values{} - headers := map[string]string{} - if options != nil { - if options.Top > 0 { - query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) - } - if options.Filter != "" { - query.Add(OdataFilter, options.Filter) - } - if len(options.Select) > 0 { - query.Add(OdataSelect, strings.Join(options.Select, ",")) - } - headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) - } - return query, headers -} - -// QueryEntities returns the entities in the table. -// You can use query options defined by the OData Protocol specification. -// -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities -func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) { - if ml == EmptyPayload { - return nil, errEmptyPayload - } - query, headers := options.getParameters() - query = addTimeout(query, timeout) - uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query) - return t.queryEntities(uri, headers, ml) -} - -// NextResults returns the next page of results -// from a QueryEntities or NextResults operation. -// -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination -func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) { - if eqr == nil { - return nil, errNilPreviousResult - } - if eqr.NextLink == nil { - return nil, errNilNextLink - } - headers := options.addToHeaders(map[string]string{}) - return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml) -} - -// SetPermissions sets up table ACL permissions -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL -func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error { - params := url.Values{"comp": {"acl"}, - "timeout": {strconv.Itoa(int(timeout))}, - } - - uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) - headers := t.tsc.client.getStandardHeaders() - headers = options.addToHeaders(headers) - - body, length, err := generateTableACLPayload(tap) - if err != nil { - return err - } - headers["Content-Length"] = strconv.Itoa(length) - - resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.Body) - - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) { - sil := SignedIdentifiers{ - SignedIdentifiers: []SignedIdentifier{}, - } - for _, tap := range policies { - permission := generateTablePermissions(&tap) - signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission) - sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) - } - return xmlMarshal(sil) -} - -// GetPermissions gets the table ACL permissions -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl -func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) { - params := url.Values{"comp": {"acl"}, - "timeout": {strconv.Itoa(int(timeout))}, - } - - uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) - headers := t.tsc.client.getStandardHeaders() - headers = options.addToHeaders(headers) - - resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - return nil, err - } - - var ap AccessPolicy - err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) - if err != nil { - return nil, err - } - return updateTableAccessPolicy(ap), nil -} - -func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) { - headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders()) - if ml != EmptyPayload { - headers[headerAccept] = string(ml) - } - - resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - return nil, err - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - var entities EntityQueryResult - err = json.Unmarshal(data, &entities) - if err != nil { - return nil, err - } - - for i := range entities.Entities { - entities.Entities[i].Table = t - } - entities.table = t - - contToken := extractContinuationTokenFromHeaders(resp.Header) - if contToken == nil { - entities.NextLink = nil - } else { - originalURI, err := url.Parse(uri) - if err != nil { - return nil, err - } - v := originalURI.Query() - v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey) - v.Set(nextRowKeyQueryParameter, contToken.NextRowKey) - newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v) - entities.NextLink = &newURI - entities.ml = ml - } - - return &entities, nil -} - -func extractContinuationTokenFromHeaders(h http.Header) *continuationToken { - ct := continuationToken{ - NextPartitionKey: h.Get(headerNextPartitionKey), - NextRowKey: h.Get(headerNextRowKey), - } - - if ct.NextPartitionKey != "" && ct.NextRowKey != "" { - return &ct - } - return nil -} - -func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy { - taps := []TableAccessPolicy{} - for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { - tap := TableAccessPolicy{ - ID: policy.ID, - StartTime: policy.AccessPolicy.StartTime, - ExpiryTime: policy.AccessPolicy.ExpiryTime, - } - tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") - tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a") - tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") - tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") - - taps = append(taps, tap) - } - return taps -} - -func generateTablePermissions(tap *TableAccessPolicy) (permissions string) { - // generate the permissions string (raud). - // still want the end user API to have bool flags. - permissions = "" - - if tap.CanRead { - permissions += "r" - } - - if tap.CanAppend { - permissions += "a" - } - - if tap.CanUpdate { - permissions += "u" - } - - if tap.CanDelete { - permissions += "d" - } - return permissions -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go deleted file mode 100644 index 6595fb70f..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go +++ /dev/null @@ -1,328 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "mime/multipart" - "net/http" - "net/textproto" - "sort" - "strings" - - "github.com/marstr/guid" -) - -// Operation type. Insert, Delete, Replace etc. -type Operation int - -// consts for batch operations. -const ( - InsertOp = Operation(1) - DeleteOp = Operation(2) - ReplaceOp = Operation(3) - MergeOp = Operation(4) - InsertOrReplaceOp = Operation(5) - InsertOrMergeOp = Operation(6) -) - -// BatchEntity used for tracking Entities to operate on and -// whether operations (replace/merge etc) should be forced. -// Wrapper for regular Entity with additional data specific for the entity. -type BatchEntity struct { - *Entity - Force bool - Op Operation -} - -// TableBatch stores all the entities that will be operated on during a batch process. -// Entities can be inserted, replaced or deleted. -type TableBatch struct { - BatchEntitySlice []BatchEntity - - // reference to table we're operating on. - Table *Table -} - -// defaultChangesetHeaders for changeSets -var defaultChangesetHeaders = map[string]string{ - "Accept": "application/json;odata=minimalmetadata", - "Content-Type": "application/json", - "Prefer": "return-no-content", -} - -// NewBatch return new TableBatch for populating. -func (t *Table) NewBatch() *TableBatch { - return &TableBatch{ - Table: t, - } -} - -// InsertEntity adds an entity in preparation for a batch insert. -func (t *TableBatch) InsertEntity(entity *Entity) { - be := BatchEntity{Entity: entity, Force: false, Op: InsertOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace. -func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) { - be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag -func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) { - t.InsertOrReplaceEntity(entity, true) -} - -// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge. -func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) { - be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag -func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) { - t.InsertOrMergeEntity(entity, true) -} - -// ReplaceEntity adds an entity in preparation for a batch replace. -func (t *TableBatch) ReplaceEntity(entity *Entity) { - be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// DeleteEntity adds an entity in preparation for a batch delete -func (t *TableBatch) DeleteEntity(entity *Entity, force bool) { - be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag -func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) { - t.DeleteEntity(entity, true) -} - -// MergeEntity adds an entity in preparation for a batch merge -func (t *TableBatch) MergeEntity(entity *Entity) { - be := BatchEntity{Entity: entity, Force: false, Op: MergeOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// ExecuteBatch executes many table operations in one request to Azure. -// The operations can be combinations of Insert, Delete, Replace and Merge -// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses -// the changesets. -// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions -func (t *TableBatch) ExecuteBatch() error { - - // Using `github.com/marstr/guid` is in response to issue #947 (https://github.com/Azure/azure-sdk-for-go/issues/947). - id, err := guid.NewGUIDs(guid.CreationStrategyVersion1) - if err != nil { - return err - } - - changesetBoundary := fmt.Sprintf("changeset_%s", id.String()) - uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil) - changesetBody, err := t.generateChangesetBody(changesetBoundary) - if err != nil { - return err - } - - id, err = guid.NewGUIDs(guid.CreationStrategyVersion1) - if err != nil { - return err - } - - boundary := fmt.Sprintf("batch_%s", id.String()) - body, err := generateBody(changesetBody, changesetBoundary, boundary) - if err != nil { - return err - } - - headers := t.Table.tsc.client.getStandardHeaders() - headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary) - - resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.resp.Body) - - if err = checkRespCode(resp.resp, []int{http.StatusAccepted}); err != nil { - - // check which batch failed. - operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value) - requestID, date, version := getDebugHeaders(resp.resp.Header) - return AzureStorageServiceError{ - StatusCode: resp.resp.StatusCode, - Code: resp.odata.Err.Code, - RequestID: requestID, - Date: date, - APIVersion: version, - Message: operationFailedMessage, - } - } - - return nil -} - -// getFailedOperation parses the original Azure error string and determines which operation failed -// and generates appropriate message. -func (t *TableBatch) getFailedOperation(errorMessage string) string { - // errorMessage consists of "number:string" we just need the number. - sp := strings.Split(errorMessage, ":") - if len(sp) > 1 { - msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage) - return msg - } - - // cant parse the message, just return the original message to client - return errorMessage -} - -// generateBody generates the complete body for the batch request. -func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) { - - body := new(bytes.Buffer) - writer := multipart.NewWriter(body) - writer.SetBoundary(boundary) - h := make(textproto.MIMEHeader) - h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary)) - batchWriter, err := writer.CreatePart(h) - if err != nil { - return nil, err - } - batchWriter.Write(changeSetBody.Bytes()) - writer.Close() - return body, nil -} - -// generateChangesetBody generates the individual changesets for the various operations within the batch request. -// There is a changeset for Insert, Delete, Merge etc. -func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) { - - body := new(bytes.Buffer) - writer := multipart.NewWriter(body) - writer.SetBoundary(changesetBoundary) - - for _, be := range t.BatchEntitySlice { - t.generateEntitySubset(&be, writer) - } - - writer.Close() - return body, nil -} - -// generateVerb generates the HTTP request VERB required for each changeset. -func generateVerb(op Operation) (string, error) { - switch op { - case InsertOp: - return http.MethodPost, nil - case DeleteOp: - return http.MethodDelete, nil - case ReplaceOp, InsertOrReplaceOp: - return http.MethodPut, nil - case MergeOp, InsertOrMergeOp: - return "MERGE", nil - default: - return "", errors.New("Unable to detect operation") - } -} - -// generateQueryPath generates the query path for within the changesets -// For inserts it will just be a table query path (table name) -// but for other operations (modifying an existing entity) then -// the partition/row keys need to be generated. -func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string { - if op == InsertOp { - return entity.Table.buildPath() - } - return entity.buildPath() -} - -// generateGenericOperationHeaders generates common headers for a given operation. -func generateGenericOperationHeaders(be *BatchEntity) map[string]string { - retval := map[string]string{} - - for k, v := range defaultChangesetHeaders { - retval[k] = v - } - - if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp { - if be.Force || be.Entity.OdataEtag == "" { - retval["If-Match"] = "*" - } else { - retval["If-Match"] = be.Entity.OdataEtag - } - } - - return retval -} - -// generateEntitySubset generates body payload for particular batch entity -func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error { - - h := make(textproto.MIMEHeader) - h.Set(headerContentType, "application/http") - h.Set(headerContentTransferEncoding, "binary") - - verb, err := generateVerb(batchEntity.Op) - if err != nil { - return err - } - - genericOpHeadersMap := generateGenericOperationHeaders(batchEntity) - queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity) - uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil) - - operationWriter, err := writer.CreatePart(h) - if err != nil { - return err - } - - urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri) - operationWriter.Write([]byte(urlAndVerb)) - writeHeaders(genericOpHeadersMap, &operationWriter) - operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body. - - // delete operation doesn't need a body. - if batchEntity.Op != DeleteOp { - //var e Entity = batchEntity.Entity - body, err := json.Marshal(batchEntity.Entity) - if err != nil { - return err - } - operationWriter.Write(body) - } - - return nil -} - -func writeHeaders(h map[string]string, writer *io.Writer) { - // This way it is guaranteed the headers will be written in a sorted order - var keys []string - for k := range h { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - (*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k]))) - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go deleted file mode 100644 index 1f063a395..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go +++ /dev/null @@ -1,204 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strconv" -) - -const ( - headerAccept = "Accept" - headerEtag = "Etag" - headerPrefer = "Prefer" - headerXmsContinuation = "x-ms-Continuation-NextTableName" -) - -// TableServiceClient contains operations for Microsoft Azure Table Storage -// Service. -type TableServiceClient struct { - client Client - auth authentication -} - -// TableOptions includes options for some table operations -type TableOptions struct { - RequestID string -} - -func (options *TableOptions) addToHeaders(h map[string]string) map[string]string { - if options != nil { - h = addToHeaders(h, "x-ms-client-request-id", options.RequestID) - } - return h -} - -// QueryNextLink includes information for getting the next page of -// results in query operations -type QueryNextLink struct { - NextLink *string - ml MetadataLevel -} - -// GetServiceProperties gets the properties of your storage account's table service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties -func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) { - return t.client.getServiceProperties(tableServiceName, t.auth) -} - -// SetServiceProperties sets the properties of your storage account's table service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties -func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error { - return t.client.setServiceProperties(props, tableServiceName, t.auth) -} - -// GetTableReference returns a Table object for the specified table name. -func (t *TableServiceClient) GetTableReference(name string) *Table { - return &Table{ - tsc: t, - Name: name, - } -} - -// QueryTablesOptions includes options for some table operations -type QueryTablesOptions struct { - Top uint - Filter string - RequestID string -} - -func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) { - query := url.Values{} - headers := map[string]string{} - if options != nil { - if options.Top > 0 { - query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) - } - if options.Filter != "" { - query.Add(OdataFilter, options.Filter) - } - headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) - } - return query, headers -} - -// QueryTables returns the tables in the storage account. -// You can use query options defined by the OData Protocol specification. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables -func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) { - query, headers := options.getParameters() - uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query) - return t.queryTables(uri, headers, ml) -} - -// NextResults returns the next page of results -// from a QueryTables or a NextResults operation. -// -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination -func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) { - if tqr == nil { - return nil, errNilPreviousResult - } - if tqr.NextLink == nil { - return nil, errNilNextLink - } - headers := options.addToHeaders(map[string]string{}) - - return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml) -} - -// TableQueryResult contains the response from -// QueryTables and QueryTablesNextResults functions. -type TableQueryResult struct { - OdataMetadata string `json:"odata.metadata"` - Tables []Table `json:"value"` - QueryNextLink - tsc *TableServiceClient -} - -func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) { - if ml == EmptyPayload { - return nil, errEmptyPayload - } - headers = mergeHeaders(headers, t.client.getStandardHeaders()) - headers[headerAccept] = string(ml) - - resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { - return nil, err - } - - respBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - var out TableQueryResult - err = json.Unmarshal(respBody, &out) - if err != nil { - return nil, err - } - - for i := range out.Tables { - out.Tables[i].tsc = t - } - out.tsc = t - - nextLink := resp.Header.Get(http.CanonicalHeaderKey(headerXmsContinuation)) - if nextLink == "" { - out.NextLink = nil - } else { - originalURI, err := url.Parse(uri) - if err != nil { - return nil, err - } - v := originalURI.Query() - v.Set(nextTableQueryParameter, nextLink) - newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v) - out.NextLink = &newURI - out.ml = ml - } - - return &out, nil -} - -func addBodyRelatedHeaders(h map[string]string, length int) map[string]string { - h[headerContentType] = "application/json" - h[headerContentLength] = fmt.Sprintf("%v", length) - h[headerAcceptCharset] = "UTF-8" - return h -} - -func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string { - if ml != EmptyPayload { - h[headerPrefer] = "return-content" - h[headerAccept] = string(ml) - } else { - h[headerPrefer] = "return-no-content" - // From API version 2015-12-11 onwards, Accept header is required - h[headerAccept] = string(NoMetadata) - } - return h -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go deleted file mode 100644 index e8a5dcf8c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go +++ /dev/null @@ -1,244 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -var ( - fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)) - accountSASOptions = AccountSASTokenOptions{ - Services: Services{ - Blob: true, - }, - ResourceTypes: ResourceTypes{ - Service: true, - Container: true, - Object: true, - }, - Permissions: Permissions{ - Read: true, - Write: true, - Delete: true, - List: true, - Add: true, - Create: true, - Update: true, - Process: true, - }, - Expiry: fixedTime, - UseHTTPS: true, - } -) - -func (c Client) computeHmac256(message string) string { - h := hmac.New(sha256.New, c.accountKey) - h.Write([]byte(message)) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func currentTimeRfc1123Formatted() string { - return timeRfc1123Formatted(time.Now().UTC()) -} - -func timeRfc1123Formatted(t time.Time) string { - return t.Format(http.TimeFormat) -} - -func timeRFC3339Formatted(t time.Time) string { - return t.Format("2006-01-02T15:04:05.0000000Z") -} - -func mergeParams(v1, v2 url.Values) url.Values { - out := url.Values{} - for k, v := range v1 { - out[k] = v - } - for k, v := range v2 { - vals, ok := out[k] - if ok { - vals = append(vals, v...) - out[k] = vals - } else { - out[k] = v - } - } - return out -} - -func prepareBlockListRequest(blocks []Block) string { - s := `` - for _, v := range blocks { - s += fmt.Sprintf("<%s>%s", v.Status, v.ID, v.Status) - } - s += `` - return s -} - -func xmlUnmarshal(body io.Reader, v interface{}) error { - data, err := ioutil.ReadAll(body) - if err != nil { - return err - } - return xml.Unmarshal(data, v) -} - -func xmlMarshal(v interface{}) (io.Reader, int, error) { - b, err := xml.Marshal(v) - if err != nil { - return nil, 0, err - } - return bytes.NewReader(b), len(b), nil -} - -func headersFromStruct(v interface{}) map[string]string { - headers := make(map[string]string) - value := reflect.ValueOf(v) - for i := 0; i < value.NumField(); i++ { - key := value.Type().Field(i).Tag.Get("header") - if key != "" { - reflectedValue := reflect.Indirect(value.Field(i)) - var val string - if reflectedValue.IsValid() { - switch reflectedValue.Type() { - case reflect.TypeOf(fixedTime): - val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time)) - case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)): - val = strconv.FormatUint(reflectedValue.Uint(), 10) - case reflect.TypeOf(int(0)): - val = strconv.FormatInt(reflectedValue.Int(), 10) - default: - val = reflectedValue.String() - } - } - if val != "" { - headers[key] = val - } - } - } - return headers -} - -// merges extraHeaders into headers and returns headers -func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { - for k, v := range extraHeaders { - headers[k] = v - } - return headers -} - -func addToHeaders(h map[string]string, key, value string) map[string]string { - if value != "" { - h[key] = value - } - return h -} - -func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string { - if value != nil { - h = addToHeaders(h, key, timeRfc1123Formatted(*value)) - } - return h -} - -func addTimeout(params url.Values, timeout uint) url.Values { - if timeout > 0 { - params.Add("timeout", fmt.Sprintf("%v", timeout)) - } - return params -} - -func addSnapshot(params url.Values, snapshot *time.Time) url.Values { - if snapshot != nil { - params.Add("snapshot", timeRFC3339Formatted(*snapshot)) - } - return params -} - -func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) { - var out time.Time - var err error - outStr := h.Get(key) - if outStr != "" { - out, err = time.Parse(time.RFC1123, outStr) - if err != nil { - return nil, err - } - } - return &out, nil -} - -// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling -type TimeRFC1123 time.Time - -// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout. -func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var value string - d.DecodeElement(&value, &start) - parse, err := time.Parse(time.RFC1123, value) - if err != nil { - return err - } - *t = TimeRFC1123(parse) - return nil -} - -// MarshalXML marshals using time.RFC1123. -func (t *TimeRFC1123) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - return e.EncodeElement(time.Time(*t).Format(time.RFC1123), start) -} - -// returns a map of custom metadata values from the specified HTTP header -func getMetadataFromHeaders(header http.Header) map[string]string { - metadata := make(map[string]string) - for k, v := range header { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["lol"] = content of the last X-Ms-Meta-Lol header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - - if len(metadata) == 0 { - return nil - } - - return metadata -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go deleted file mode 100644 index 2d8f2ab9f..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ /dev/null @@ -1,21 +0,0 @@ -package version - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// Number contains the semantic version of this SDK. -const Number = "v14.6.0" diff --git a/vendor/github.com/Azure/azure-storage-blob-go/LICENSE b/vendor/github.com/Azure/azure-storage-blob-go/LICENSE new file mode 100644 index 000000000..d1ca00f20 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go new file mode 100644 index 000000000..25fe68422 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go @@ -0,0 +1,65 @@ +package azblob + +import ( + "time" +) + +// ModifiedAccessConditions identifies standard HTTP access conditions which you optionally set. +type ModifiedAccessConditions struct { + IfModifiedSince time.Time + IfUnmodifiedSince time.Time + IfMatch ETag + IfNoneMatch ETag +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac ModifiedAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) { + if !ac.IfModifiedSince.IsZero() { + ims = &ac.IfModifiedSince + } + if !ac.IfUnmodifiedSince.IsZero() { + ius = &ac.IfUnmodifiedSince + } + if ac.IfMatch != ETagNone { + ime = &ac.IfMatch + } + if ac.IfNoneMatch != ETagNone { + inme = &ac.IfNoneMatch + } + return +} + +// ContainerAccessConditions identifies container-specific access conditions which you optionally set. +type ContainerAccessConditions struct { + ModifiedAccessConditions + LeaseAccessConditions +} + +// BlobAccessConditions identifies blob-specific access conditions which you optionally set. +type BlobAccessConditions struct { + ModifiedAccessConditions + LeaseAccessConditions +} + +// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set. +type LeaseAccessConditions struct { + LeaseID string +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac LeaseAccessConditions) pointers() (leaseID *string) { + if ac.LeaseID != "" { + leaseID = &ac.LeaseID + } + return +} + +/* +// getInt32 is for internal infrastructure. It is used with access condition values where +// 0 (the default setting) is meaningful. The library interprets 0 as do not send the header +// and the privately-storage field in the access condition object is stored as +1 higher than desired. +// THis method returns true, if the value is > 0 (explicitly set) and the stored value - 1 (the set desired value). +func getInt32(value int32) (bool, int32) { + return value > 0, value - 1 +} +*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/atomicmorph.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/atomicmorph.go new file mode 100644 index 000000000..9e18a7943 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/atomicmorph.go @@ -0,0 +1,69 @@ +package azblob + +import "sync/atomic" + +// AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function. +// The AtomicMorpher callback is passed a startValue and based on this value it returns +// what the new value should be and the result that AtomicMorph should return to its caller. +type atomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{}) + +const targetAndMorpherMustNotBeNil = "target and morpher must not be nil" + +// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. +func atomicMorphInt32(target *int32, morpher atomicMorpherInt32) interface{} { + for { + currentVal := atomic.LoadInt32(target) + desiredVal, morphResult := morpher(currentVal) + if atomic.CompareAndSwapInt32(target, currentVal, desiredVal) { + return morphResult + } + } +} + +// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function. +// The AtomicMorpher callback is passed a startValue and based on this value it returns +// what the new value should be and the result that AtomicMorph should return to its caller. +type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{}) + +// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. +func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) interface{} { + for { + currentVal := atomic.LoadUint32(target) + desiredVal, morphResult := morpher(currentVal) + if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) { + return morphResult + } + } +} + +// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function. +// The AtomicMorpher callback is passed a startValue and based on this value it returns +// what the new value should be and the result that AtomicMorph should return to its caller. +type atomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{}) + +// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. +func atomicMorphInt64(target *int64, morpher atomicMorpherInt64) interface{} { + for { + currentVal := atomic.LoadInt64(target) + desiredVal, morphResult := morpher(currentVal) + if atomic.CompareAndSwapInt64(target, currentVal, desiredVal) { + return morphResult + } + } +} + +// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function. +// The AtomicMorpher callback is passed a startValue and based on this value it returns +// what the new value should be and the result that AtomicMorph should return to its caller. +type atomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{}) + +// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. +func atomicMorphUint64(target *uint64, morpher atomicMorpherUint64) interface{} { + for { + currentVal := atomic.LoadUint64(target) + desiredVal, morphResult := morpher(currentVal) + if atomic.CompareAndSwapUint64(target, currentVal, desiredVal) { + return morphResult + } + } +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go new file mode 100644 index 000000000..46091d645 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go @@ -0,0 +1,535 @@ +package azblob + +import ( + "context" + "encoding/base64" + "io" + "net/http" + + "bytes" + "os" + "sync" + "time" + + "errors" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// CommonResponse returns the headers common to all blob REST API responses. +type CommonResponse interface { + // ETag returns the value for header ETag. + ETag() ETag + + // LastModified returns the value for header Last-Modified. + LastModified() time.Time + + // RequestID returns the value for header x-ms-request-id. + RequestID() string + + // Date returns the value for header Date. + Date() time.Time + + // Version returns the value for header x-ms-version. + Version() string + + // Response returns the raw HTTP response object. + Response() *http.Response +} + +// UploadToBlockBlobOptions identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions. +type UploadToBlockBlobOptions struct { + // BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL. + // Note that the progress reporting is not always increasing; it can go down when retrying a request. + Progress pipeline.ProgressReceiver + + // BlobHTTPHeaders indicates the HTTP headers to be associated with the blob. + BlobHTTPHeaders BlobHTTPHeaders + + // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. + Metadata Metadata + + // AccessConditions indicates the access conditions for the block blob. + AccessConditions BlobAccessConditions + + // Parallelism indicates the maximum number of blocks to upload in parallel (0=default) + Parallelism uint16 +} + +// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob. +func UploadBufferToBlockBlob(ctx context.Context, b []byte, + blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { + bufferSize := int64(len(b)) + if o.BlockSize == 0 { + // If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error + if bufferSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks { + return nil, errors.New("Buffer is too large to upload to a block blob") + } + // If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request + if bufferSize <= BlockBlobMaxUploadBlobBytes { + o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified + } else { + o.BlockSize = bufferSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks + if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB + o.BlockSize = BlobDefaultDownloadBlockSize + } + // StageBlock will be called with blockSize blocks and a parallelism of (BufferSize / BlockSize). + } + } + + if bufferSize <= BlockBlobMaxUploadBlobBytes { + // If the size can fit in 1 Upload call, do it this way + var body io.ReadSeeker = bytes.NewReader(b) + if o.Progress != nil { + body = pipeline.NewRequestBodyProgress(body, o.Progress) + } + return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions) + } + + var numBlocks = uint16(((bufferSize - 1) / o.BlockSize) + 1) + + blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs + progress := int64(0) + progressLock := &sync.Mutex{} + + err := doBatchTransfer(ctx, batchTransferOptions{ + operationName: "UploadBufferToBlockBlob", + transferSize: bufferSize, + chunkSize: o.BlockSize, + parallelism: o.Parallelism, + operation: func(offset int64, count int64) error { + // This function is called once per block. + // It is passed this block's offset within the buffer and its count of bytes + // Prepare to read the proper block/section of the buffer + var body io.ReadSeeker = bytes.NewReader(b[offset : offset+count]) + blockNum := offset / o.BlockSize + if o.Progress != nil { + blockProgress := int64(0) + body = pipeline.NewRequestBodyProgress(body, + func(bytesTransferred int64) { + diff := bytesTransferred - blockProgress + blockProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets a progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks + // at the same time causing PutBlockList to get a mix of blocks from all the clients. + blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes()) + _, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil) + return err + }, + }) + if err != nil { + return nil, err + } + // All put blocks were successful, call Put Block List to finalize the blob + return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions) +} + +// UploadFileToBlockBlob uploads a file in blocks to a block blob. +func UploadFileToBlockBlob(ctx context.Context, file *os.File, + blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { + + stat, err := file.Stat() + if err != nil { + return nil, err + } + m := mmf{} // Default to an empty slice; used for 0-size file + if stat.Size() != 0 { + m, err = newMMF(file, false, 0, int(stat.Size())) + if err != nil { + return nil, err + } + defer m.unmap() + } + return UploadBufferToBlockBlob(ctx, m, blockBlobURL, o) +} + +/////////////////////////////////////////////////////////////////////////////// + +const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB + +// DownloadFromBlobOptions identifies options used by the DownloadBlobToBuffer and DownloadBlobToFile functions. +type DownloadFromBlobOptions struct { + // BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress pipeline.ProgressReceiver + + // AccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions BlobAccessConditions + + // Parallelism indicates the maximum number of blocks to download in parallel (0=default) + Parallelism uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +// downloadBlobToBuffer downloads an Azure blob to a buffer with parallel. +func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, + b []byte, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error { + if o.BlockSize == 0 { + o.BlockSize = BlobDefaultDownloadBlockSize + } + + if count == CountToEnd { // If size not specified, calculate it + if initialDownloadResponse != nil { + count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it + } else { + // If we don't have the length at all, get it + dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false) + if err != nil { + return err + } + count = dr.ContentLength() - offset + } + } + + // Prepare and do parallel download. + progress := int64(0) + progressLock := &sync.Mutex{} + + err := doBatchTransfer(ctx, batchTransferOptions{ + operationName: "downloadBlobToBuffer", + transferSize: count, + chunkSize: o.BlockSize, + parallelism: o.Parallelism, + operation: func(chunkStart int64, count int64) error { + dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false) + body := dr.Body(o.RetryReaderOptionsPerBlock) + if o.Progress != nil { + rangeProgress := int64(0) + body = pipeline.NewResponseBodyProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + _, err = io.ReadFull(body, b[chunkStart:chunkStart+count]) + body.Close() + return err + }, + }) + if err != nil { + return err + } + return nil +} + +// DownloadBlobToBuffer downloads an Azure blob to a buffer with parallel. +// Offset and count are optional, pass 0 for both to download the entire blob. +func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, + b []byte, o DownloadFromBlobOptions) error { + return downloadBlobToBuffer(ctx, blobURL, offset, count, b, o, nil) +} + +// DownloadBlobToFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +// Offset and count are optional, pass 0 for both to download the entire blob. +func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64, + file *os.File, o DownloadFromBlobOptions) error { + // 1. Calculate the size of the destination file + var size int64 + + if count == CountToEnd { + // Try to get Azure blob's size + props, err := blobURL.GetProperties(ctx, o.AccessConditions) + if err != nil { + return err + } + size = props.ContentLength() - offset + } else { + size = count + } + + // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. + stat, err := file.Stat() + if err != nil { + return err + } + if stat.Size() != size { + if err = file.Truncate(size); err != nil { + return err + } + } + + if size > 0 { + // 3. Set mmap and call downloadBlobToBuffer. + m, err := newMMF(file, true, 0, int(size)) + if err != nil { + return err + } + defer m.unmap() + return downloadBlobToBuffer(ctx, blobURL, offset, size, m, o, nil) + } else { // if the blob's size is 0, there is no need in downloading it + return nil + } +} + +/////////////////////////////////////////////////////////////////////////////// + +// BatchTransferOptions identifies options used by doBatchTransfer. +type batchTransferOptions struct { + transferSize int64 + chunkSize int64 + parallelism uint16 + operation func(offset int64, chunkSize int64) error + operationName string +} + +// doBatchTransfer helps to execute operations in a batch manner. +func doBatchTransfer(ctx context.Context, o batchTransferOptions) error { + // Prepare and do parallel operations. + numChunks := uint16(((o.transferSize - 1) / o.chunkSize) + 1) + operationChannel := make(chan func() error, o.parallelism) // Create the channel that release 'parallelism' goroutines concurrently + operationResponseChannel := make(chan error, numChunks) // Holds each response + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create the goroutines that process each operation (in parallel). + if o.parallelism == 0 { + o.parallelism = 5 // default parallelism + } + for g := uint16(0); g < o.parallelism; g++ { + //grIndex := g + go func() { + for f := range operationChannel { + //fmt.Printf("[%s] gr-%d start action\n", o.operationName, grIndex) + err := f() + operationResponseChannel <- err + //fmt.Printf("[%s] gr-%d end action\n", o.operationName, grIndex) + } + }() + } + + // Add each chunk's operation to the channel. + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + curChunkSize := o.chunkSize + + if chunkNum == numChunks-1 { // Last chunk + curChunkSize = o.transferSize - (int64(chunkNum) * o.chunkSize) // Remove size of all transferred chunks from total + } + offset := int64(chunkNum) * o.chunkSize + + operationChannel <- func() error { + return o.operation(offset, curChunkSize) + } + } + close(operationChannel) + + // Wait for the operations to complete. + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + responseError := <-operationResponseChannel + if responseError != nil { + cancel() // As soon as any operation fails, cancel all remaining operation calls + return responseError // No need to process anymore responses + } + } + return nil +} + +//////////////////////////////////////////////////////////////////////////////////////////////// + +type UploadStreamToBlockBlobOptions struct { + BufferSize int + MaxBuffers int + BlobHTTPHeaders BlobHTTPHeaders + Metadata Metadata + AccessConditions BlobAccessConditions +} + +func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL, + o UploadStreamToBlockBlobOptions) (CommonResponse, error) { + result, err := uploadStream(ctx, reader, + UploadStreamOptions{BufferSize: o.BufferSize, MaxBuffers: o.MaxBuffers}, + &uploadStreamToBlockBlobOptions{b: blockBlobURL, o: o, blockIDPrefix: newUUID()}) + if err != nil { + return nil, err + } + return result.(CommonResponse), nil +} + +type uploadStreamToBlockBlobOptions struct { + b BlockBlobURL + o UploadStreamToBlockBlobOptions + blockIDPrefix uuid // UUID used with all blockIDs + maxBlockNum uint32 // defaults to 0 + firstBlock []byte // Used only if maxBlockNum is 0 +} + +func (t *uploadStreamToBlockBlobOptions) start(ctx context.Context) (interface{}, error) { + return nil, nil +} + +func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32, buffer []byte) error { + if num == 0 { + t.firstBlock = buffer + + // If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation + // If the payload is exactly the same size as the buffer, there may be more content coming in. + if len(buffer) < t.o.BufferSize { + return nil + } + } + // Else, upload a staged block... + atomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) { + // Atomically remember (in t.numBlocks) the maximum block num we've ever seen + if startVal < num { + return num, nil + } + return startVal, nil + }) + blockID := newUuidBlockID(t.blockIDPrefix).WithBlockNumber(num).ToBase64() + _, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{}, nil) + return err +} + +func (t *uploadStreamToBlockBlobOptions) end(ctx context.Context) (interface{}, error) { + // If the first block had the exact same size as the buffer + // we would have staged it as a block thinking that there might be more data coming + if t.maxBlockNum == 0 && len(t.firstBlock) != t.o.BufferSize { + // If whole payload fits in 1 block (block #0), upload it with 1 I/O operation + return t.b.Upload(ctx, bytes.NewReader(t.firstBlock), + t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions) + } + // Multiple blocks staged, commit them all now + blockID := newUuidBlockID(t.blockIDPrefix) + blockIDs := make([]string, t.maxBlockNum+1) + for bn := uint32(0); bn <= t.maxBlockNum; bn++ { + blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64() + } + return t.b.CommitBlockList(ctx, blockIDs, t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions) +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +type iTransfer interface { + start(ctx context.Context) (interface{}, error) + chunk(ctx context.Context, num uint32, buffer []byte) error + end(ctx context.Context) (interface{}, error) +} + +type UploadStreamOptions struct { + MaxBuffers int + BufferSize int +} + +type firstErr struct { + lock sync.Mutex + finalError error +} + +func (fe *firstErr) set(err error) { + fe.lock.Lock() + if fe.finalError == nil { + fe.finalError = err + } + fe.lock.Unlock() +} + +func (fe *firstErr) get() (err error) { + fe.lock.Lock() + err = fe.finalError + fe.lock.Unlock() + return +} + +func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions, t iTransfer) (interface{}, error) { + firstErr := firstErr{} + ctx, cancel := context.WithCancel(ctx) // New context so that any failure cancels everything + defer cancel() + wg := sync.WaitGroup{} // Used to know when all outgoing messages have finished processing + type OutgoingMsg struct { + chunkNum uint32 + buffer []byte + } + + // Create a channel to hold the buffers usable for incoming datsa + incoming := make(chan []byte, o.MaxBuffers) + outgoing := make(chan OutgoingMsg, o.MaxBuffers) // Channel holding outgoing buffers + if result, err := t.start(ctx); err != nil { + return result, err + } + + numBuffers := 0 // The number of buffers & out going goroutines created so far + injectBuffer := func() { + // For each Buffer, create it and a goroutine to upload it + incoming <- make([]byte, o.BufferSize) // Add the new buffer to the incoming channel so this goroutine can from the reader into it + numBuffers++ + go func() { + for outgoingMsg := range outgoing { + // Upload the outgoing buffer + err := t.chunk(ctx, outgoingMsg.chunkNum, outgoingMsg.buffer) + wg.Done() // Indicate this buffer was sent + if nil != err { + // NOTE: finalErr could be assigned to multiple times here which is OK, + // some error will be returned. + firstErr.set(err) + cancel() + } + incoming <- outgoingMsg.buffer // The goroutine reading from the stream can reuse this buffer now + } + }() + } + injectBuffer() // Create our 1st buffer & outgoing goroutine + + // This goroutine grabs a buffer, reads from the stream into the buffer, + // and inserts the buffer into the outgoing channel to be uploaded + for c := uint32(0); true; c++ { // Iterate once per chunk + var buffer []byte + if numBuffers < o.MaxBuffers { + select { + // We're not at max buffers, see if a previously-created buffer is available + case buffer = <-incoming: + break + default: + // No buffer available; inject a new buffer & go routine to process it + injectBuffer() + buffer = <-incoming // Grab the just-injected buffer + } + } else { + // We are at max buffers, block until we get to reuse one + buffer = <-incoming + } + n, err := io.ReadFull(reader, buffer) + if err != nil { // Less than len(buffer) bytes were read + buffer = buffer[:n] // Make slice match the # of read bytes + } + if len(buffer) > 0 { + // Buffer not empty, upload it + wg.Add(1) // We're posting a buffer to be sent + outgoing <- OutgoingMsg{chunkNum: c, buffer: buffer} + } + if err != nil { // The reader is done, no more outgoing buffers + if err == io.EOF || err == io.ErrUnexpectedEOF { + err = nil // This function does NOT return an error if io.ReadFull returns io.EOF or io.ErrUnexpectedEOF + } else { + firstErr.set(err) + } + break + } + } + // NOTE: Don't close the incoming channel because the outgoing goroutines post buffers into it when they are done + close(outgoing) // Make all the outgoing goroutines terminate when this channel is empty + wg.Wait() // Wait for all pending outgoing messages to complete + err := firstErr.get() + if err == nil { + // If no error, after all blocks uploaded, commit them to the blob & return the result + return t.end(ctx) + } + return nil, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go new file mode 100644 index 000000000..0647f2385 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go @@ -0,0 +1,148 @@ +package azblob + +import ( + "net" + "net/url" + "strings" +) + +const ( + snapshot = "snapshot" + SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" +) + +// A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an +// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL(). +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type BlobURLParts struct { + Scheme string // Ex: "https://" + Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80" + IPEndpointStyleInfo IPEndpointStyleInfo + ContainerName string // "" if no container + BlobName string // "" if no blob + Snapshot string // "" if not a snapshot + SAS SASQueryParameters + UnparsedParams string +} + +// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. +// Ex: "https://10.132.141.33/accountname/containername" +type IPEndpointStyleInfo struct { + AccountName string // "" if not using IP endpoint style +} + +// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: +// http(s)://IP(:port)/storageaccount/container/... +// As url's Host property, host could be both host or host:port +func isIPEndpointStyle(host string) bool { + if host == "" { + return false + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + // For IPv6, there could be case where SplitHostPort fails for cannot finding port. + // In this case, eliminate the '[' and ']' in the URL. + // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + return net.ParseIP(host) != nil +} + +// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object. +func NewBlobURLParts(u url.URL) BlobURLParts { + up := BlobURLParts{ + Scheme: u.Scheme, + Host: u.Host, + } + + // Find the container & blob names (if any) + if u.Path != "" { + path := u.Path + if path[0] == '/' { + path = path[1:] // If path starts with a slash, remove it + } + if isIPEndpointStyle(up.Host) { + if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob + up.IPEndpointStyleInfo.AccountName = path + } else { + up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes + path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names) + } + } + + containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists) + if containerEndIndex == -1 { // Slash not found; path has container name & no blob name + up.ContainerName = path + } else { + up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes + up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash + } + } + + // Convert the query parameters to a case-sensitive map & trim whitespace + paramsMap := u.Query() + + up.Snapshot = "" // Assume no snapshot + if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok { + up.Snapshot = snapshotStr[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, snapshot) + } + up.SAS = newSASQueryParameters(paramsMap, true) + up.UnparsedParams = paramsMap.Encode() + return up +} + +type caseInsensitiveValues url.Values // map[string][]string +func (values caseInsensitiveValues) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + for k, v := range values { + if strings.ToLower(k) == key { + return v, true + } + } + return []string{}, false +} + +// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery +// field contains the SAS, snapshot, and unparsed query parameters. +func (up BlobURLParts) URL() url.URL { + path := "" + if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { + path += "/" + up.IPEndpointStyleInfo.AccountName + } + // Concatenate container & blob names (if they exist) + if up.ContainerName != "" { + path += "/" + up.ContainerName + if up.BlobName != "" { + path += "/" + up.BlobName + } + } + + rawQuery := up.UnparsedParams + + // Concatenate blob snapshot query parameter (if it exists) + if up.Snapshot != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += snapshot + "=" + up.Snapshot + } + sas := up.SAS.Encode() + if sas != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += sas + } + u := url.URL{ + Scheme: up.Scheme, + Host: up.Host, + Path: path, + RawQuery: rawQuery, + } + return u +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go new file mode 100644 index 000000000..67f0c9f84 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go @@ -0,0 +1,203 @@ +package azblob + +import ( + "bytes" + "fmt" + "strings" + "time" +) + +// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas +type BlobSASSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to SASVersion + Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Identifier string `param:"si"` + ContainerName string + BlobName string // Use "" to create a Container SAS + CacheControl string // rscc + ContentDisposition string // rscd + ContentEncoding string // rsce + ContentLanguage string // rscl + ContentType string // rsct +} + +// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { + resource := "c" + if v.BlobName == "" { + // Make sure the permission characters are in the correct order + perms := &ContainerSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else { + resource = "b" + // Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } + if v.Version == "" { + v.Version = SASVersion + } + startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime) + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(sharedKeyCredential.AccountName(), v.ContainerName, v.BlobName), + v.Identifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign) + + p := SASQueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + + // Calculated SAS signature + signature: signature, + } + return p, nil +} + +// getCanonicalName computes the canonical name for a container or blob resource for SAS signing. +func getCanonicalName(account string, containerName string, blobName string) string { + // Container: "/blob/account/containername" + // Blob: "/blob/account/containername/blobname" + elements := []string{"/blob/", account, "/", containerName} + if blobName != "" { + elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) + } + return strings.Join(elements, "") +} + +// The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS. +// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +type ContainerSASPermissions struct { + Read, Add, Create, Write, Delete, List bool +} + +// String produces the SAS permissions string for an Azure Storage container. +// Call this method to set BlobSASSignatureValues's Permissions field. +func (p ContainerSASPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// Parse initializes the ContainerSASPermissions's fields from a string. +func (p *ContainerSASPermissions) Parse(s string) error { + *p = ContainerSASPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return fmt.Errorf("Invalid permission: '%v'", r) + } + } + return nil +} + +// The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. +// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +type BlobSASPermissions struct{ Read, Add, Create, Write, Delete bool } + +// String produces the SAS permissions string for an Azure Storage blob. +// Call this method to set BlobSASSignatureValues's Permissions field. +func (p BlobSASPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + return b.String() +} + +// Parse initializes the BlobSASPermissions's fields from a string. +func (p *BlobSASPermissions) Parse(s string) error { + *p = BlobSASPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + default: + return fmt.Errorf("Invalid permission: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go new file mode 100644 index 000000000..d260f8aee --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go @@ -0,0 +1,195 @@ +package azblob + +// https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes + +// ServiceCode values indicate a service failure. +const ( + // ServiceCodeAppendPositionConditionNotMet means the append position condition specified was not met. + ServiceCodeAppendPositionConditionNotMet ServiceCodeType = "AppendPositionConditionNotMet" + + // ServiceCodeBlobAlreadyExists means the specified blob already exists. + ServiceCodeBlobAlreadyExists ServiceCodeType = "BlobAlreadyExists" + + // ServiceCodeBlobNotFound means the specified blob does not exist. + ServiceCodeBlobNotFound ServiceCodeType = "BlobNotFound" + + // ServiceCodeBlobOverwritten means the blob has been recreated since the previous snapshot was taken. + ServiceCodeBlobOverwritten ServiceCodeType = "BlobOverwritten" + + // ServiceCodeBlobTierInadequateForContentLength means the specified blob tier size limit cannot be less than content length. + ServiceCodeBlobTierInadequateForContentLength ServiceCodeType = "BlobTierInadequateForContentLength" + + // ServiceCodeBlockCountExceedsLimit means the committed block count cannot exceed the maximum limit of 50,000 blocks + // or that the uncommitted block count cannot exceed the maximum limit of 100,000 blocks. + ServiceCodeBlockCountExceedsLimit ServiceCodeType = "BlockCountExceedsLimit" + + // ServiceCodeBlockListTooLong means the block list may not contain more than 50,000 blocks. + ServiceCodeBlockListTooLong ServiceCodeType = "BlockListTooLong" + + // ServiceCodeCannotChangeToLowerTier means that a higher blob tier has already been explicitly set. + ServiceCodeCannotChangeToLowerTier ServiceCodeType = "CannotChangeToLowerTier" + + // ServiceCodeCannotVerifyCopySource means that the service could not verify the copy source within the specified time. + // Examine the HTTP status code and message for more information about the failure. + ServiceCodeCannotVerifyCopySource ServiceCodeType = "CannotVerifyCopySource" + + // ServiceCodeContainerAlreadyExists means the specified container already exists. + ServiceCodeContainerAlreadyExists ServiceCodeType = "ContainerAlreadyExists" + + // ServiceCodeContainerBeingDeleted means the specified container is being deleted. + ServiceCodeContainerBeingDeleted ServiceCodeType = "ContainerBeingDeleted" + + // ServiceCodeContainerDisabled means the specified container has been disabled by the administrator. + ServiceCodeContainerDisabled ServiceCodeType = "ContainerDisabled" + + // ServiceCodeContainerNotFound means the specified container does not exist. + ServiceCodeContainerNotFound ServiceCodeType = "ContainerNotFound" + + // ServiceCodeContentLengthLargerThanTierLimit means the blob's content length cannot exceed its tier limit. + ServiceCodeContentLengthLargerThanTierLimit ServiceCodeType = "ContentLengthLargerThanTierLimit" + + // ServiceCodeCopyAcrossAccountsNotSupported means the copy source account and destination account must be the same. + ServiceCodeCopyAcrossAccountsNotSupported ServiceCodeType = "CopyAcrossAccountsNotSupported" + + // ServiceCodeCopyIDMismatch means the specified copy ID did not match the copy ID for the pending copy operation. + ServiceCodeCopyIDMismatch ServiceCodeType = "CopyIdMismatch" + + // ServiceCodeFeatureVersionMismatch means the type of blob in the container is unrecognized by this version or + // that the operation for AppendBlob requires at least version 2015-02-21. + ServiceCodeFeatureVersionMismatch ServiceCodeType = "FeatureVersionMismatch" + + // ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob. + ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch" + + // ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob. + ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + + // ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot. + ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot" + + // ServiceCodeInfiniteLeaseDurationRequired means the lease ID matched, but the specified lease must be an infinite-duration lease. + ServiceCodeInfiniteLeaseDurationRequired ServiceCodeType = "InfiniteLeaseDurationRequired" + + // ServiceCodeInvalidBlobOrBlock means the specified blob or block content is invalid. + ServiceCodeInvalidBlobOrBlock ServiceCodeType = "InvalidBlobOrBlock" + + // ServiceCodeInvalidBlobType means the blob type is invalid for this operation. + ServiceCodeInvalidBlobType ServiceCodeType = "InvalidBlobType" + + // ServiceCodeInvalidBlockID means the specified block ID is invalid. The block ID must be Base64-encoded. + ServiceCodeInvalidBlockID ServiceCodeType = "InvalidBlockId" + + // ServiceCodeInvalidBlockList means the specified block list is invalid. + ServiceCodeInvalidBlockList ServiceCodeType = "InvalidBlockList" + + // ServiceCodeInvalidOperation means an invalid operation against a blob snapshot. + ServiceCodeInvalidOperation ServiceCodeType = "InvalidOperation" + + // ServiceCodeInvalidPageRange means the page range specified is invalid. + ServiceCodeInvalidPageRange ServiceCodeType = "InvalidPageRange" + + // ServiceCodeInvalidSourceBlobType means the copy source blob type is invalid for this operation. + ServiceCodeInvalidSourceBlobType ServiceCodeType = "InvalidSourceBlobType" + + // ServiceCodeInvalidSourceBlobURL means the source URL for incremental copy request must be valid Azure Storage blob URL. + ServiceCodeInvalidSourceBlobURL ServiceCodeType = "InvalidSourceBlobUrl" + + // ServiceCodeInvalidVersionForPageBlobOperation means that all operations on page blobs require at least version 2009-09-19. + ServiceCodeInvalidVersionForPageBlobOperation ServiceCodeType = "InvalidVersionForPageBlobOperation" + + // ServiceCodeLeaseAlreadyPresent means there is already a lease present. + ServiceCodeLeaseAlreadyPresent ServiceCodeType = "LeaseAlreadyPresent" + + // ServiceCodeLeaseAlreadyBroken means the lease has already been broken and cannot be broken again. + ServiceCodeLeaseAlreadyBroken ServiceCodeType = "LeaseAlreadyBroken" + + // ServiceCodeLeaseIDMismatchWithBlobOperation means the lease ID specified did not match the lease ID for the blob. + ServiceCodeLeaseIDMismatchWithBlobOperation ServiceCodeType = "LeaseIdMismatchWithBlobOperation" + + // ServiceCodeLeaseIDMismatchWithContainerOperation means the lease ID specified did not match the lease ID for the container. + ServiceCodeLeaseIDMismatchWithContainerOperation ServiceCodeType = "LeaseIdMismatchWithContainerOperation" + + // ServiceCodeLeaseIDMismatchWithLeaseOperation means the lease ID specified did not match the lease ID for the blob/container. + ServiceCodeLeaseIDMismatchWithLeaseOperation ServiceCodeType = "LeaseIdMismatchWithLeaseOperation" + + // ServiceCodeLeaseIDMissing means there is currently a lease on the blob/container and no lease ID was specified in the request. + ServiceCodeLeaseIDMissing ServiceCodeType = "LeaseIdMissing" + + // ServiceCodeLeaseIsBreakingAndCannotBeAcquired means the lease ID matched, but the lease is currently in breaking state and cannot be acquired until it is broken. + ServiceCodeLeaseIsBreakingAndCannotBeAcquired ServiceCodeType = "LeaseIsBreakingAndCannotBeAcquired" + + // ServiceCodeLeaseIsBreakingAndCannotBeChanged means the lease ID matched, but the lease is currently in breaking state and cannot be changed. + ServiceCodeLeaseIsBreakingAndCannotBeChanged ServiceCodeType = "LeaseIsBreakingAndCannotBeChanged" + + // ServiceCodeLeaseIsBrokenAndCannotBeRenewed means the lease ID matched, but the lease has been broken explicitly and cannot be renewed. + ServiceCodeLeaseIsBrokenAndCannotBeRenewed ServiceCodeType = "LeaseIsBrokenAndCannotBeRenewed" + + // ServiceCodeLeaseLost means a lease ID was specified, but the lease for the blob/container has expired. + ServiceCodeLeaseLost ServiceCodeType = "LeaseLost" + + // ServiceCodeLeaseNotPresentWithBlobOperation means there is currently no lease on the blob. + ServiceCodeLeaseNotPresentWithBlobOperation ServiceCodeType = "LeaseNotPresentWithBlobOperation" + + // ServiceCodeLeaseNotPresentWithContainerOperation means there is currently no lease on the container. + ServiceCodeLeaseNotPresentWithContainerOperation ServiceCodeType = "LeaseNotPresentWithContainerOperation" + + // ServiceCodeLeaseNotPresentWithLeaseOperation means there is currently no lease on the blob/container. + ServiceCodeLeaseNotPresentWithLeaseOperation ServiceCodeType = "LeaseNotPresentWithLeaseOperation" + + // ServiceCodeMaxBlobSizeConditionNotMet means the max blob size condition specified was not met. + ServiceCodeMaxBlobSizeConditionNotMet ServiceCodeType = "MaxBlobSizeConditionNotMet" + + // ServiceCodeNoPendingCopyOperation means there is currently no pending copy operation. + ServiceCodeNoPendingCopyOperation ServiceCodeType = "NoPendingCopyOperation" + + // ServiceCodeOperationNotAllowedOnIncrementalCopyBlob means the specified operation is not allowed on an incremental copy blob. + ServiceCodeOperationNotAllowedOnIncrementalCopyBlob ServiceCodeType = "OperationNotAllowedOnIncrementalCopyBlob" + + // ServiceCodePendingCopyOperation means there is currently a pending copy operation. + ServiceCodePendingCopyOperation ServiceCodeType = "PendingCopyOperation" + + // ServiceCodePreviousSnapshotCannotBeNewer means the prevsnapshot query parameter value cannot be newer than snapshot query parameter value. + ServiceCodePreviousSnapshotCannotBeNewer ServiceCodeType = "PreviousSnapshotCannotBeNewer" + + // ServiceCodePreviousSnapshotNotFound means the previous snapshot is not found. + ServiceCodePreviousSnapshotNotFound ServiceCodeType = "PreviousSnapshotNotFound" + + // ServiceCodePreviousSnapshotOperationNotSupported means that differential Get Page Ranges is not supported on the previous snapshot. + ServiceCodePreviousSnapshotOperationNotSupported ServiceCodeType = "PreviousSnapshotOperationNotSupported" + + // ServiceCodeSequenceNumberConditionNotMet means the sequence number condition specified was not met. + ServiceCodeSequenceNumberConditionNotMet ServiceCodeType = "SequenceNumberConditionNotMet" + + // ServiceCodeSequenceNumberIncrementTooLarge means the sequence number increment cannot be performed because it would result in overflow of the sequence number. + ServiceCodeSequenceNumberIncrementTooLarge ServiceCodeType = "SequenceNumberIncrementTooLarge" + + // ServiceCodeSnapshotCountExceeded means the snapshot count against this blob has been exceeded. + ServiceCodeSnapshotCountExceeded ServiceCodeType = "SnapshotCountExceeded" + + // ServiceCodeSnaphotOperationRateExceeded means the rate of snapshot operations against this blob has been exceeded. + ServiceCodeSnaphotOperationRateExceeded ServiceCodeType = "SnaphotOperationRateExceeded" + + // ServiceCodeSnapshotsPresent means this operation is not permitted while the blob has snapshots. + ServiceCodeSnapshotsPresent ServiceCodeType = "SnapshotsPresent" + + // ServiceCodeSourceConditionNotMet means the source condition specified using HTTP conditional header(s) is not met. + ServiceCodeSourceConditionNotMet ServiceCodeType = "SourceConditionNotMet" + + // ServiceCodeSystemInUse means this blob is in use by the system. + ServiceCodeSystemInUse ServiceCodeType = "SystemInUse" + + // ServiceCodeTargetConditionNotMet means the target condition specified using HTTP conditional header(s) is not met. + ServiceCodeTargetConditionNotMet ServiceCodeType = "TargetConditionNotMet" + + // ServiceCodeUnauthorizedBlobOverwrite means this request is not authorized to perform blob overwrites. + ServiceCodeUnauthorizedBlobOverwrite ServiceCodeType = "UnauthorizedBlobOverwrite" + + // ServiceCodeBlobBeingRehydrated means this operation is not permitted because the blob is being rehydrated. + ServiceCodeBlobBeingRehydrated ServiceCodeType = "BlobBeingRehydrated" + + // ServiceCodeBlobArchived means this operation is not permitted on an archived blob. + ServiceCodeBlobArchived ServiceCodeType = "BlobArchived" + + // ServiceCodeBlobNotArchived means this blob is currently not in the archived state. + ServiceCodeBlobNotArchived ServiceCodeType = "BlobNotArchived" +) diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go new file mode 100644 index 000000000..8366e9631 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go @@ -0,0 +1,116 @@ +package azblob + +import ( + "context" + "io" + "net/url" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // AppendBlobMaxAppendBlockBytes indicates the maximum number of bytes that can be sent in a call to AppendBlock. + AppendBlobMaxAppendBlockBytes = 4 * 1024 * 1024 // 4MB + + // AppendBlobMaxBlocks indicates the maximum number of blocks allowed in an append blob. + AppendBlobMaxBlocks = 50000 +) + +// AppendBlobURL defines a set of operations applicable to append blobs. +type AppendBlobURL struct { + BlobURL + abClient appendBlobClient +} + +// NewAppendBlobURL creates an AppendBlobURL object using the specified URL and request policy pipeline. +func NewAppendBlobURL(url url.URL, p pipeline.Pipeline) AppendBlobURL { + blobClient := newBlobClient(url, p) + abClient := newAppendBlobClient(url, p) + return AppendBlobURL{BlobURL: BlobURL{blobClient: blobClient}, abClient: abClient} +} + +// WithPipeline creates a new AppendBlobURL object identical to the source but with the specific request policy pipeline. +func (ab AppendBlobURL) WithPipeline(p pipeline.Pipeline) AppendBlobURL { + return NewAppendBlobURL(ab.blobClient.URL(), p) +} + +// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL { + p := NewBlobURLParts(ab.URL()) + p.Snapshot = snapshot + return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline()) +} + +// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() + return ab.abClient.Create(ctx, 0, nil, + &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, + &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, + ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, nil) +} + +// AppendBlock writes a stream to a new block of data to the end of the existing append blob. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. +func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers() + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return nil, err + } + return ab.abClient.AppendBlock(ctx, body, count, nil, + transactionalMD5, ac.LeaseAccessConditions.pointers(), + ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +type AppendBlobAccessConditions struct { + ModifiedAccessConditions + LeaseAccessConditions + AppendPositionAccessConditions +} + +// AppendPositionAccessConditions identifies append blob-specific access conditions which you optionally set. +type AppendPositionAccessConditions struct { + // IfAppendPositionEqual ensures that the AppendBlock operation succeeds + // only if the append position is equal to a value. + // IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified. + // IfAppendPositionEqual>0 means 'IfAppendPositionEqual' header specified with its value + // IfAppendPositionEqual==-1 means IfAppendPositionEqual' header specified with a value of 0 + IfAppendPositionEqual int64 + + // IfMaxSizeLessThanOrEqual ensures that the AppendBlock operation succeeds + // only if the append blob's size is less than or equal to a value. + // IfMaxSizeLessThanOrEqual=0 means no 'IfMaxSizeLessThanOrEqual' header specified. + // IfMaxSizeLessThanOrEqual>0 means 'IfMaxSizeLessThanOrEqual' header specified with its value + // IfMaxSizeLessThanOrEqual==-1 means 'IfMaxSizeLessThanOrEqual' header specified with a value of 0 + IfMaxSizeLessThanOrEqual int64 +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) { + var zero int64 // defaults to 0 + switch ac.IfAppendPositionEqual { + case -1: + iape = &zero + case 0: + iape = nil + default: + iape = &ac.IfAppendPositionEqual + } + + switch ac.IfMaxSizeLessThanOrEqual { + case -1: + imsltoe = &zero + case 0: + imsltoe = nil + default: + imsltoe = &ac.IfMaxSizeLessThanOrEqual + } + return +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go new file mode 100644 index 000000000..41d13402c --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go @@ -0,0 +1,216 @@ +package azblob + +import ( + "context" + "net/url" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type BlobURL struct { + blobClient blobClient +} + +// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline. +func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL { + blobClient := newBlobClient(url, p) + return BlobURL{blobClient: blobClient} +} + +// URL returns the URL endpoint used by the BlobURL object. +func (b BlobURL) URL() url.URL { + return b.blobClient.URL() +} + +// String returns the URL as a string. +func (b BlobURL) String() string { + u := b.URL() + return u.String() +} + +// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline. +func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL { + return NewBlobURL(b.blobClient.URL(), p) +} + +// WithSnapshot creates a new BlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (b BlobURL) WithSnapshot(snapshot string) BlobURL { + p := NewBlobURLParts(b.URL()) + p.Snapshot = snapshot + return NewBlobURL(p.URL(), b.blobClient.Pipeline()) +} + +// ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline. +func (b BlobURL) ToAppendBlobURL() AppendBlobURL { + return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline()) +} + +// ToBlockBlobURL creates a BlockBlobURL using the source's URL and pipeline. +func (b BlobURL) ToBlockBlobURL() BlockBlobURL { + return NewBlockBlobURL(b.URL(), b.blobClient.Pipeline()) +} + +// ToPageBlobURL creates a PageBlobURL using the source's URL and pipeline. +func (b BlobURL) ToPageBlobURL() PageBlobURL { + return NewPageBlobURL(b.URL(), b.blobClient.Pipeline()) +} + +// DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) { + var xRangeGetContentMD5 *bool + if rangeGetContentMD5 { + xRangeGetContentMD5 = &rangeGetContentMD5 + } + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + dr, err := b.blobClient.Download(ctx, nil, nil, + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + if err != nil { + return nil, err + } + return &DownloadResponse{ + b: b, + r: dr, + ctx: ctx, + getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()}, + }, err +} + +// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) { + return b.blobClient.Undelete(ctx, nil, nil) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions) (*BlobSetTierResponse, error) { + return b.blobClient.SetTier(ctx, tier, nil, nil, lac.pointers()) +} + +// GetBlobProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// SetBlobHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.SetHTTPHeaders(ctx, nil, + &h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage, + ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + &h.ContentDisposition, nil) +} + +// SetBlobMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobCreateSnapshotResponse, error) { + // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter + // because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this + // performance hit. + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.CreateSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil) +} + +// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between +// 15 to 60 seconds, or infinite (-1). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// RenewLease renews the blob's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.RenewLease(ctx, leaseID, nil, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// ReleaseLease releases the blob's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.ReleaseLease(ctx, leaseID, nil, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) +// constant to break a fixed-duration lease when it expires or an infinite lease immediately. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// ChangeLease changes the blob's lease ID. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.ChangeLease(ctx, leaseID, proposedID, + nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics. +const LeaseBreakNaturally = -1 + +func leasePeriodPointer(period int32) (p *int32) { + if period != LeaseBreakNaturally { + p = &period + } + return nil +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) { + srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() + dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() + dstLeaseID := dstac.LeaseAccessConditions.pointers() + + return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata, + srcIfModifiedSince, srcIfUnmodifiedSince, + srcIfMatchETag, srcIfNoneMatchETag, + dstIfModifiedSince, dstIfUnmodifiedSince, + dstIfMatchETag, dstIfNoneMatchETag, + dstLeaseID, nil) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (b BlobURL) AbortCopyFromURL(ctx context.Context, copyID string, ac LeaseAccessConditions) (*BlobAbortCopyFromURLResponse, error) { + return b.blobClient.AbortCopyFromURL(ctx, copyID, nil, ac.pointers(), nil) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go new file mode 100644 index 000000000..e1839ee46 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go @@ -0,0 +1,161 @@ +package azblob + +import ( + "context" + "io" + "net/url" + + "encoding/base64" + "encoding/binary" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. + BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB + + // BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. + BlockBlobMaxStageBlockBytes = 100 * 1024 * 1024 // 100MB + + // BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob. + BlockBlobMaxBlocks = 50000 +) + +// BlockBlobURL defines a set of operations applicable to block blobs. +type BlockBlobURL struct { + BlobURL + bbClient blockBlobClient +} + +// NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline. +func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL { + blobClient := newBlobClient(url, p) + bbClient := newBlockBlobClient(url, p) + return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient} +} + +// WithPipeline creates a new BlockBlobURL object identical to the source but with the specific request policy pipeline. +func (bb BlockBlobURL) WithPipeline(p pipeline.Pipeline) BlockBlobURL { + return NewBlockBlobURL(bb.blobClient.URL(), p) +} + +// WithSnapshot creates a new BlockBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL { + p := NewBlobURLParts(bb.URL()) + p.Snapshot = snapshot + return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline()) +} + +// Upload creates a new block blob or overwrites an existing block blob. +// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not +// supported with Upload; the content of the existing blob is overwritten with the new content. To +// perform a partial update of a block blob, use StageBlock and CommitBlockList. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return nil, err + } + return bb.bbClient.Upload(ctx, body, count, nil, + &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, + &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), + &h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil) +} + +// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. +func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte) (*BlockBlobStageBlockResponse, error) { + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return nil, err + } + return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, ac.pointers(), nil) +} + +// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// If count is CountToEnd (0), then data is read from specified offset to the end. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. +func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, ac LeaseAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) { + return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, ac.pointers(), nil) +} + +// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written +// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob +// by uploading only those blocks that have changed, then committing the new and existing +// blocks together. Any blocks not specified in the block list and permanently deleted. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. +func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders, + metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil, + &h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, + metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. +func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) { + return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), nil) +} + +////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +type BlockID [64]byte + +func (blockID BlockID) ToBase64() string { + return base64.StdEncoding.EncodeToString(blockID[:]) +} + +func (blockID *BlockID) FromBase64(s string) error { + *blockID = BlockID{} // Zero out the block ID + _, err := base64.StdEncoding.Decode(blockID[:], ([]byte)(s)) + return err +} + +////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +type uuidBlockID BlockID + +func (ubi uuidBlockID) UUID() uuid { + u := uuid{} + copy(u[:], ubi[:len(u)]) + return u +} + +func (ubi uuidBlockID) Number() uint32 { + return binary.BigEndian.Uint32(ubi[len(uuid{}):]) +} + +func newUuidBlockID(u uuid) uuidBlockID { + ubi := uuidBlockID{} // Create a new uuidBlockID + copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it + // Block number defaults to 0 + return ubi +} + +func (ubi *uuidBlockID) SetUUID(u uuid) *uuidBlockID { + copy(ubi[:len(u)], u[:]) + return ubi +} + +func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID { + binary.BigEndian.PutUint32(ubi[len(uuid{}):], blockNumber) // Put block number after UUID + return ubi // Return the passed-in copy +} + +func (ubi uuidBlockID) ToBase64() string { + return BlockID(ubi).ToBase64() +} + +func (ubi *uuidBlockID) FromBase64(s string) error { + return (*BlockID)(ubi).FromBase64(s) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go new file mode 100644 index 000000000..20806f361 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go @@ -0,0 +1,295 @@ +package azblob + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/url" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs. +type ContainerURL struct { + client containerClient +} + +// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline. +func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL { + client := newContainerClient(url, p) + return ContainerURL{client: client} +} + +// URL returns the URL endpoint used by the ContainerURL object. +func (c ContainerURL) URL() url.URL { + return c.client.URL() +} + +// String returns the URL as a string. +func (c ContainerURL) String() string { + u := c.URL() + return u.String() +} + +// WithPipeline creates a new ContainerURL object identical to the source but with the specified request policy pipeline. +func (c ContainerURL) WithPipeline(p pipeline.Pipeline) ContainerURL { + return NewContainerURL(c.URL(), p) +} + +// NewBlobURL creates a new BlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new BlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the BlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlobURL instead of calling this object's +// NewBlobURL method. +func (c ContainerURL) NewBlobURL(blobName string) BlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewBlobURL(blobURL, c.client.Pipeline()) +} + +// NewAppendBlobURL creates a new AppendBlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewAppendBlobURL instead of calling this object's +// NewAppendBlobURL method. +func (c ContainerURL) NewAppendBlobURL(blobName string) AppendBlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewAppendBlobURL(blobURL, c.client.Pipeline()) +} + +// NewBlockBlobURL creates a new BlockBlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new BlockBlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the BlockBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlockBlobURL instead of calling this object's +// NewBlockBlobURL method. +func (c ContainerURL) NewBlockBlobURL(blobName string) BlockBlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewBlockBlobURL(blobURL, c.client.Pipeline()) +} + +// NewPageBlobURL creates a new PageBlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewPageBlobURL instead of calling this object's +// NewPageBlobURL method. +func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewPageBlobURL(blobURL, c.client.Pipeline()) +} + +// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. +func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) { + return c.client.Create(ctx, nil, metadata, publicAccessType, nil) +} + +// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. +func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) { + if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { + return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") + } + + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers() + return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, nil) +} + +// GetProperties returns the container's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. +func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessConditions) (*ContainerGetPropertiesResponse, error) { + // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. + // This allows us to not expose a GetProperties method at all simplifying the API. + return c.client.GetProperties(ctx, nil, ac.pointers(), nil) +} + +// SetMetadata sets the container's metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. +func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) { + if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { + return nil, errors.New("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service") + } + ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers() + return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil) +} + +// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. +func (c ContainerURL) GetAccessPolicy(ctx context.Context, ac LeaseAccessConditions) (*SignedIdentifiers, error) { + return c.client.GetAccessPolicy(ctx, nil, ac.pointers(), nil) +} + +// The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission struct { + Read, Add, Create, Write, Delete, List bool +} + +// String produces the access policy permission string for an Azure Storage container. +// Call this method to set AccessPolicy's Permission field. +func (p AccessPolicyPermission) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// Parse initializes the AccessPolicyPermission's fields from a string. +func (p *AccessPolicyPermission) Parse(s string) error { + *p = AccessPolicyPermission{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} + +// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. +func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier, + ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) { + if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { + return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") + } + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers() + return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(), + accessType, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*ContainerAcquireLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.AcquireLease(ctx, nil, &duration, &proposedID, + ifModifiedSince, ifUnmodifiedSince, nil) +} + +// RenewLease renews the container's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerRenewLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// ReleaseLease releases the container's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerReleaseLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// BreakLease breaks the container's previously-acquired lease (if it exists). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac ModifiedAccessConditions) (*ContainerBreakLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil) +} + +// ChangeLease changes the container's lease ID. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*ContainerChangeLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// ListBlobsFlatSegment returns a single segment of blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// After getting a segment, process it, and then call ListBlobsFlatSegment again (passing the the +// previously-returned Marker) to get the next segment. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) { + prefix, include, maxResults := o.pointers() + return c.client.ListBlobFlatSegment(ctx, prefix, marker.val, maxResults, include, nil, nil) +} + +// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the +// previously-returned Marker) to get the next segment. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) { + if o.Details.Snapshots { + return nil, errors.New("snapshots are not supported in this listing operation") + } + prefix, include, maxResults := o.pointers() + return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.val, maxResults, include, nil, nil) +} + +// ListBlobsSegmentOptions defines options available when calling ListBlobs. +type ListBlobsSegmentOptions struct { + Details BlobListingDetails // No IncludeType header is produced if "" + Prefix string // No Prefix header is produced if "" + + // SetMaxResults sets the maximum desired results you want the service to return. Note, the + // service may return fewer results than requested. + // MaxResults=0 means no 'MaxResults' header specified. + MaxResults int32 +} + +func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlobsIncludeItemType, maxResults *int32) { + if o.Prefix != "" { + prefix = &o.Prefix + } + include = o.Details.slice() + if o.MaxResults != 0 { + maxResults = &o.MaxResults + } + return +} + +// BlobListingDetails indicates what additional information the service should return with each blob. +type BlobListingDetails struct { + Copy, Metadata, Snapshots, UncommittedBlobs, Deleted bool +} + +// string produces the Include query parameter's value. +func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType { + items := []ListBlobsIncludeItemType{} + // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! + if d.Copy { + items = append(items, ListBlobsIncludeItemCopy) + } + if d.Deleted { + items = append(items, ListBlobsIncludeItemDeleted) + } + if d.Metadata { + items = append(items, ListBlobsIncludeItemMetadata) + } + if d.Snapshots { + items = append(items, ListBlobsIncludeItemSnapshots) + } + if d.UncommittedBlobs { + items = append(items, ListBlobsIncludeItemUncommittedblobs) + } + return items +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go new file mode 100644 index 000000000..953f3be6d --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go @@ -0,0 +1,208 @@ +package azblob + +import ( + "context" + "fmt" + "io" + "net/url" + "strconv" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // PageBlobPageBytes indicates the number of bytes in a page (512). + PageBlobPageBytes = 512 + + // PageBlobMaxPutPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage. + PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB +) + +// PageBlobURL defines a set of operations applicable to page blobs. +type PageBlobURL struct { + BlobURL + pbClient pageBlobClient +} + +// NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline. +func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL { + blobClient := newBlobClient(url, p) + pbClient := newPageBlobClient(url, p) + return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient} +} + +// WithPipeline creates a new PageBlobURL object identical to the source but with the specific request policy pipeline. +func (pb PageBlobURL) WithPipeline(p pipeline.Pipeline) PageBlobURL { + return NewPageBlobURL(pb.blobClient.URL(), p) +} + +// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL { + p := NewBlobURLParts(pb.URL()) + p.Snapshot = snapshot + return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline()) +} + +// Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return pb.pbClient.Create(ctx, 0, size, nil, + &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl, + metadata, ac.LeaseAccessConditions.pointers(), + &h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &sequenceNumber, nil) +} + +// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte) (*PageBlobUploadPagesResponse, error) { + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return nil, err + } + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() + return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil, + PageRange{Start: offset, End: offset + count - 1}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// ClearPages frees the specified pages from the page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions) (*PageBlobClearPagesResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() + return pb.pbClient.ClearPages(ctx, 0, nil, + PageRange{Start: offset, End: offset + count - 1}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, + ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return pb.pbClient.GetPageRanges(ctx, nil, nil, + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot, + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil) +} + +// Resize resizes the page blob to the specified size (which must be a multiple of 512). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobResizeResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// SetSequenceNumber sets the page blob's sequence number. +func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64, + ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) { + sn := &sequenceNumber + if action == SequenceNumberActionIncrement { + sn = nil + } + ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() + return pb.pbClient.UpdateSequenceNumber(ctx, action, nil, + ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, + sn, nil) +} + +// StartIncrementalCopy begins an operation to start an incremental copy from one page blob's snapshot to this page blob. +// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. +// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and +// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. +func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + qp := source.Query() + qp.Set("snapshot", snapshot) + source.RawQuery = qp.Encode() + return pb.pbClient.CopyIncremental(ctx, source.String(), nil, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +func (pr PageRange) pointers() *string { + endOffset := strconv.FormatInt(int64(pr.End), 10) + asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset) + return &asString +} + +type PageBlobAccessConditions struct { + ModifiedAccessConditions + LeaseAccessConditions + SequenceNumberAccessConditions +} + +// SequenceNumberAccessConditions identifies page blob-specific access conditions which you optionally set. +type SequenceNumberAccessConditions struct { + // IfSequenceNumberLessThan ensures that the page blob operation succeeds + // only if the blob's sequence number is less than a value. + // IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified. + // IfSequenceNumberLessThan>0 means 'IfSequenceNumberLessThan' header specified with its value + // IfSequenceNumberLessThan==-1 means 'IfSequenceNumberLessThan' header specified with a value of 0 + IfSequenceNumberLessThan int64 + + // IfSequenceNumberLessThanOrEqual ensures that the page blob operation succeeds + // only if the blob's sequence number is less than or equal to a value. + // IfSequenceNumberLessThanOrEqual=0 means no 'IfSequenceNumberLessThanOrEqual' header specified. + // IfSequenceNumberLessThanOrEqual>0 means 'IfSequenceNumberLessThanOrEqual' header specified with its value + // IfSequenceNumberLessThanOrEqual=-1 means 'IfSequenceNumberLessThanOrEqual' header specified with a value of 0 + IfSequenceNumberLessThanOrEqual int64 + + // IfSequenceNumberEqual ensures that the page blob operation succeeds + // only if the blob's sequence number is equal to a value. + // IfSequenceNumberEqual=0 means no 'IfSequenceNumberEqual' header specified. + // IfSequenceNumberEqual>0 means 'IfSequenceNumberEqual' header specified with its value + // IfSequenceNumberEqual=-1 means 'IfSequenceNumberEqual' header specified with a value of 0 + IfSequenceNumberEqual int64 +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac SequenceNumberAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) { + var zero int64 // Defaults to 0 + switch ac.IfSequenceNumberLessThan { + case -1: + snlt = &zero + case 0: + snlt = nil + default: + snlt = &ac.IfSequenceNumberLessThan + } + + switch ac.IfSequenceNumberLessThanOrEqual { + case -1: + snltoe = &zero + case 0: + snltoe = nil + default: + snltoe = &ac.IfSequenceNumberLessThanOrEqual + } + switch ac.IfSequenceNumberEqual { + case -1: + sne = &zero + case 0: + sne = nil + default: + sne = &ac.IfSequenceNumberEqual + } + return +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go new file mode 100644 index 000000000..06d96fde1 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go @@ -0,0 +1,134 @@ +package azblob + +import ( + "context" + "net/url" + "strings" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. + ContainerNameRoot = "$root" + + // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. + ContainerNameLogs = "$logs" +) + +// A ServiceURL represents a URL to the Azure Storage Blob service allowing you to manipulate blob containers. +type ServiceURL struct { + client serviceClient +} + +// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline. +func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL { + client := newServiceClient(primaryURL, p) + return ServiceURL{client: client} +} + +// URL returns the URL endpoint used by the ServiceURL object. +func (s ServiceURL) URL() url.URL { + return s.client.URL() +} + +// String returns the URL as a string. +func (s ServiceURL) String() string { + u := s.URL() + return u.String() +} + +// WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline. +func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL { + return NewServiceURL(s.URL(), p) +} + +// NewContainerURL creates a new ContainerURL object by concatenating containerName to the end of +// ServiceURL's URL. The new ContainerURL uses the same request policy pipeline as the ServiceURL. +// To change the pipeline, create the ContainerURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewContainerURL instead of calling this object's +// NewContainerURL method. +func (s ServiceURL) NewContainerURL(containerName string) ContainerURL { + containerURL := appendToURLPath(s.URL(), containerName) + return NewContainerURL(containerURL, s.client.Pipeline()) +} + +// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required) +func appendToURLPath(u url.URL, name string) url.URL { + // e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f" + // When you call url.Parse() this is what you'll get: + // Scheme: "https" + // Opaque: "" + // User: nil + // Host: "ms.com" + // Path: "/a/b/" This should start with a / and it might or might not have a trailing slash + // RawPath: "" + // ForceQuery: false + // RawQuery: "k1=v1&k2=v2" + // Fragment: "f" + if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' { + u.Path += "/" // Append "/" to end before appending name + } + u.Path += name + return u +} + +// ListContainersFlatSegment returns a single segment of containers starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Container names are returned in lexicographic order. +// After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the +// previously-returned Marker) to get the next segment. For more information, see +// https://docs.microsoft.com/rest/api/storageservices/list-containers2. +func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) { + prefix, include, maxResults := o.pointers() + return s.client.ListContainersSegment(ctx, prefix, marker.val, maxResults, include, nil, nil) +} + +// ListContainersOptions defines options available when calling ListContainers. +type ListContainersSegmentOptions struct { + Detail ListContainersDetail // No IncludeType header is produced if "" + Prefix string // No Prefix header is produced if "" + MaxResults int32 // 0 means unspecified + // TODO: update swagger to generate this type? +} + +func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListContainersIncludeType, maxResults *int32) { + if o.Prefix != "" { + prefix = &o.Prefix + } + if o.MaxResults != 0 { + maxResults = &o.MaxResults + } + include = ListContainersIncludeType(o.Detail.string()) + return +} + +// ListContainersFlatDetail indicates what additional information the service should return with each container. +type ListContainersDetail struct { + // Tells the service whether to return metadata for each container. + Metadata bool +} + +// string produces the Include query parameter's value. +func (d *ListContainersDetail) string() string { + items := make([]string, 0, 1) + // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! + if d.Metadata { + items = append(items, string(ListContainersIncludeMetadata)) + } + if len(items) > 0 { + return strings.Join(items, ",") + } + return string(ListContainersIncludeNone) +} + +func (bsu ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) { + return bsu.client.GetProperties(ctx, nil, nil) +} + +func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) { + return bsu.client.SetProperties(ctx, properties, nil, nil) +} + +func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) { + return bsu.client.GetStatistics(ctx, nil, nil) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go new file mode 100644 index 000000000..003fece5d --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go @@ -0,0 +1,3 @@ +package azblob + +const serviceLibVersion = "0.3" diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go new file mode 100644 index 000000000..a81987d54 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go @@ -0,0 +1,55 @@ +package azblob + +import ( + "context" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// Credential represent any credential type; it is used to create a credential policy Factory. +type Credential interface { + pipeline.Factory + credentialMarker() +} + +type credentialFunc pipeline.FactoryFunc + +func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return f(next, po) +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (credentialFunc) credentialMarker() {} + +////////////////////////////// + +// NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource +// or for use with Shared Access Signatures (SAS). +func NewAnonymousCredential() Credential { + return anonymousCredentialFactory +} + +var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton + +// anonymousCredentialPolicyFactory is the credential's policy factory. +type anonymousCredentialPolicyFactory struct { +} + +// New creates a credential policy object. +func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return &anonymousCredentialPolicy{next: next} +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*anonymousCredentialPolicyFactory) credentialMarker() {} + +// anonymousCredentialPolicy is the credential's policy object. +type anonymousCredentialPolicy struct { + next pipeline.Policy +} + +// Do implements the credential's policy interface. +func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + // For anonymous credentials, this is effectively a no-op + return p.next.Do(ctx, request) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go new file mode 100644 index 000000000..7a63916b7 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go @@ -0,0 +1,196 @@ +package azblob + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "errors" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + bytes, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return &SharedKeyCredential{}, err + } + return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil +} + +// SharedKeyCredential contains an account's name and its primary or secondary key. +// It is immutable making it shareable and goroutine-safe. +type SharedKeyCredential struct { + // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only + accountName string + accountKey []byte +} + +// AccountName returns the Storage account's name. +func (f SharedKeyCredential) AccountName() string { + return f.accountName +} + +// New creates a credential policy object. +func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + // Add a x-ms-date header if it doesn't already exist + if d := request.Header.Get(headerXmsDate); d == "" { + request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)} + } + stringToSign, err := f.buildStringToSign(request) + if err != nil { + return nil, err + } + signature := f.ComputeHMACSHA256(stringToSign) + authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "") + request.Header[headerAuthorization] = []string{authHeader} + + response, err := next.Do(ctx, request) + if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden { + // Service failed to authenticate request, log it + po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") + } + return response, err + }) +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*SharedKeyCredential) credentialMarker() {} + +// Constants ensuring that header names are correctly spelled and consistently cased. +const ( + headerAuthorization = "Authorization" + headerCacheControl = "Cache-Control" + headerContentEncoding = "Content-Encoding" + headerContentDisposition = "Content-Disposition" + headerContentLanguage = "Content-Language" + headerContentLength = "Content-Length" + headerContentMD5 = "Content-MD5" + headerContentType = "Content-Type" + headerDate = "Date" + headerIfMatch = "If-Match" + headerIfModifiedSince = "If-Modified-Since" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerRange = "Range" + headerUserAgent = "User-Agent" + headerXmsDate = "x-ms-date" + headerXmsVersion = "x-ms-version" +) + +// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (f *SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) { + h := hmac.New(sha256.New, f.accountKey) + h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + headers := request.Header + contentLength := headers.Get(headerContentLength) + if contentLength == "0" { + contentLength = "" + } + + canonicalizedResource, err := f.buildCanonicalizedResource(request.URL) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{ + request.Method, + headers.Get(headerContentEncoding), + headers.Get(headerContentLanguage), + contentLength, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + "", // Empty date because x-ms-date is expected (as per web page above) + headers.Get(headerIfModifiedSince), + headers.Get(headerIfMatch), + headers.Get(headerIfNoneMatch), + headers.Get(headerIfUnmodifiedSince), + headers.Get(headerRange), + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + return stringToSign, nil +} + +func buildCanonicalizedHeader(headers http.Header) string { + cm := map[string][]string{} + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v // NOTE: the value must not have any whitespace around it. + } + } + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + sort.Strings(keys) + ch := bytes.NewBufferString("") + for i, key := range keys { + if i > 0 { + ch.WriteRune('\n') + } + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(strings.Join(cm[key], ",")) + } + return string(ch.Bytes()) +} + +func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + cr := bytes.NewBufferString("/") + cr.WriteString(f.accountName) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + // params is a map[string][]string; param name is key; params values is []string + params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values + if err != nil { + return "", errors.New("parsing query parameters must succeed, otherwise there might be serious problems in the SDK/generated code") + } + + if len(params) > 0 { // There is at least 1 query parameter + paramNames := []string{} // We use this to sort the parameter key names + for paramName := range params { + paramNames = append(paramNames, paramName) // paramNames must be lowercase + } + sort.Strings(paramNames) + + for _, paramName := range paramNames { + paramValues := params[paramName] + sort.Strings(paramValues) + + // Join the sorted key values separated by ',' + // Then prepend "keyName:"; then add this string to the buffer + cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + } + } + return string(cr.Bytes()), nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go new file mode 100644 index 000000000..7e78d25f1 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go @@ -0,0 +1,137 @@ +package azblob + +import ( + "context" + "errors" + "sync/atomic" + + "runtime" + "sync" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// TokenRefresher represents a callback method that you write; this method is called periodically +// so you can refresh the token credential's value. +type TokenRefresher func(credential TokenCredential) time.Duration + +// TokenCredential represents a token credential (which is also a pipeline.Factory). +type TokenCredential interface { + Credential + Token() string + SetToken(newToken string) +} + +// NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage +// resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for +// tokenRefresher, then the function you pass will be called immediately so it can refresh and change the +// TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration +// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again. +// If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your +// TokenCredential object from ever invoking tokenRefresher again. Also, oen way to deal with failing to refresh a +// token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline. +func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential { + tc := &tokenCredential{} + tc.SetToken(initialToken) // We don't set it above to guarantee atomicity + if tokenRefresher == nil { + return tc // If no callback specified, return the simple tokenCredential + } + + tcwr := &tokenCredentialWithRefresh{token: tc} + tcwr.token.startRefresh(tokenRefresher) + runtime.SetFinalizer(tcwr, func(deadTC *tokenCredentialWithRefresh) { + deadTC.token.stopRefresh() + deadTC.token = nil // Sanity (not really required) + }) + return tcwr +} + +// tokenCredentialWithRefresh is a wrapper over a token credential. +// When this wrapper object gets GC'd, it stops the tokenCredential's timer +// which allows the tokenCredential object to also be GC'd. +type tokenCredentialWithRefresh struct { + token *tokenCredential +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*tokenCredentialWithRefresh) credentialMarker() {} + +// Token returns the current token value +func (f *tokenCredentialWithRefresh) Token() string { return f.token.Token() } + +// SetToken changes the current token value +func (f *tokenCredentialWithRefresh) SetToken(token string) { f.token.SetToken(token) } + +// New satisfies pipeline.Factory's New method creating a pipeline policy object. +func (f *tokenCredentialWithRefresh) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return f.token.New(next, po) +} + +/////////////////////////////////////////////////////////////////////////////// + +// tokenCredential is a pipeline.Factory is the credential's policy factory. +type tokenCredential struct { + token atomic.Value + + // The members below are only used if the user specified a tokenRefresher callback function. + timer *time.Timer + tokenRefresher TokenRefresher + lock sync.Mutex + stopped bool +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*tokenCredential) credentialMarker() {} + +// Token returns the current token value +func (f *tokenCredential) Token() string { return f.token.Load().(string) } + +// SetToken changes the current token value +func (f *tokenCredential) SetToken(token string) { f.token.Store(token) } + +// startRefresh calls refresh which immediately calls tokenRefresher +// and then starts a timer to call tokenRefresher in the future. +func (f *tokenCredential) startRefresh(tokenRefresher TokenRefresher) { + f.tokenRefresher = tokenRefresher + f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again + f.refresh() +} + +// refresh calls the user's tokenRefresher so they can refresh the token (by +// calling SetToken) and then starts another time (based on the returned duration) +// in order to refresh the token again in the future. +func (f *tokenCredential) refresh() { + d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock + if d > 0 { // If duration is 0 or negative, refresher wants to not be called again + f.lock.Lock() + if !f.stopped { + f.timer = time.AfterFunc(d, f.refresh) + } + f.lock.Unlock() + } +} + +// stopRefresh stops any pending timer and sets stopped field to true to prevent +// any new timer from starting. +// NOTE: Stopping the timer allows the GC to destroy the tokenCredential object. +func (f *tokenCredential) stopRefresh() { + f.lock.Lock() + f.stopped = true + if f.timer != nil { + f.timer.Stop() + } + f.lock.Unlock() +} + +// New satisfies pipeline.Factory's New method creating a pipeline policy object. +func (f *tokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + if request.URL.Scheme != "https" { + // HTTPS must be used, otherwise the tokens are at the risk of being exposed + return nil, errors.New("token credentials require a URL using the https protocol scheme") + } + request.Header[headerAuthorization] = []string{"Bearer " + f.Token()} + return next.Do(ctx, request) + }) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go new file mode 100644 index 000000000..0204924dd --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go @@ -0,0 +1,27 @@ +// +build linux darwin freebsd openbsd netbsd + +package azblob + +import ( + "os" + "syscall" +) + +type mmf []byte + +func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { + prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only + if writable { + prot, flags = syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED + } + addr, err := syscall.Mmap(int(file.Fd()), offset, length, prot, flags) + return mmf(addr), err +} + +func (m *mmf) unmap() { + err := syscall.Munmap(*m) + *m = nil + if err != nil { + panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption") + } +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_windows.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_windows.go new file mode 100644 index 000000000..2743644e1 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_windows.go @@ -0,0 +1,38 @@ +package azblob + +import ( + "os" + "reflect" + "syscall" + "unsafe" +) + +type mmf []byte + +func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { + prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only + if writable { + prot, access = uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) + } + hMMF, errno := syscall.CreateFileMapping(syscall.Handle(file.Fd()), nil, prot, uint32(int64(length)>>32), uint32(int64(length)&0xffffffff), nil) + if hMMF == 0 { + return nil, os.NewSyscallError("CreateFileMapping", errno) + } + defer syscall.CloseHandle(hMMF) + addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) + m := mmf{} + h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) + h.Data = addr + h.Len = length + h.Cap = h.Len + return m, nil +} + +func (m *mmf) unmap() { + addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) + *m = mmf{} + err := syscall.UnmapViewOfFile(addr) + if err != nil { + panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption") + } +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go new file mode 100644 index 000000000..f34cd0a7b --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go @@ -0,0 +1,42 @@ +package azblob + +import ( + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// PipelineOptions is used to configure a request policy pipeline's retry policy and logging. +type PipelineOptions struct { + // Log configures the pipeline's logging infrastructure indicating what information is logged and where. + Log pipeline.LogOptions + + // Retry configures the built-in retry policy behavior. + Retry RetryOptions + + // RequestLog configures the built-in request logging policy. + RequestLog RequestLogOptions + + // Telemetry configures the built-in telemetry policy behavior. + Telemetry TelemetryOptions +} + +// NewPipeline creates a Pipeline using the specified credentials and options. +func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline { + // Closest to API goes first; closest to the wire goes last + f := []pipeline.Factory{ + NewTelemetryPolicyFactory(o.Telemetry), + NewUniqueRequestIDPolicyFactory(), + NewRetryPolicyFactory(o.Retry), + } + + if _, ok := c.(*anonymousCredentialPolicyFactory); !ok { + // For AnonymousCredential, we optimize out the policy factory since it doesn't do anything + // NOTE: The credential's policy factory must appear close to the wire so it can sign any + // changes made by other factories (like UniqueRequestIDPolicyFactory) + f = append(f, c) + } + f = append(f, + pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked + NewRequestLogPolicyFactory(o.RequestLog)) + + return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: nil, Log: o.Log}) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go new file mode 100644 index 000000000..eb908e50e --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go @@ -0,0 +1,182 @@ +package azblob + +import ( + "bytes" + "context" + "fmt" + "net/http" + "net/url" + "runtime" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// RequestLogOptions configures the retry policy's behavior. +type RequestLogOptions struct { + // LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified + // duration (-1=no logging; 0=default threshold). + LogWarningIfTryOverThreshold time.Duration +} + +func (o RequestLogOptions) defaults() RequestLogOptions { + if o.LogWarningIfTryOverThreshold == 0 { + // It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/ + // But this monitors the time to get the HTTP response; NOT the time to download the response body. + o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds + } + return o +} + +// NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options. +func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory { + o = o.defaults() // Force defaults to be calculated + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + // These variables are per-policy; shared by multiple calls to Do + var try int32 + operationStart := time.Now() // If this is the 1st try, record the operation state time + return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { + try++ // The first try is #1 (not #0) + + // Log the outgoing request as informational + if po.ShouldLog(pipeline.LogInfo) { + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try) + pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil) + po.Log(pipeline.LogInfo, b.String()) + } + + // Set the time for this particular retry operation and then Do the operation. + tryStart := time.Now() + response, err = next.Do(ctx, request) // Make the request + tryEnd := time.Now() + tryDuration := tryEnd.Sub(tryStart) + opDuration := tryEnd.Sub(operationStart) + + logLevel, forceLog := pipeline.LogInfo, false // Default logging information + + // If the response took too long, we'll upgrade to warning. + if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { + // Log a warning if the try duration exceeded the specified threshold + logLevel, forceLog = pipeline.LogWarning, true + } + + if err == nil { // We got a response from the service + sc := response.Response().StatusCode + if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) { + logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx + } else { + // For other status codes, we leave the level as is. + } + } else { // This error did not get an HTTP response from the service; upgrade the severity to Error + logLevel, forceLog = pipeline.LogError, true + } + + if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog { + // We're going to log this; build the string to log + b := &bytes.Buffer{} + slow := "" + if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { + slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold) + } + fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration) + if err != nil { // This HTTP request did not get a response from the service + fmt.Fprint(b, "REQUEST ERROR\n") + } else { + if logLevel == pipeline.LogError { + fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n") + } else { + fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n") + } + } + + pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err) + if logLevel <= pipeline.LogError { + b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation) + } + msg := b.String() + + if forceLog { + pipeline.ForceLog(logLevel, msg) + } + if shouldLog { + po.Log(logLevel, msg) + } + } + return response, err + } + }) +} + +// redactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret. +func redactSigQueryParam(rawQuery string) (bool, string) { + rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig= + sigFound := strings.Contains(rawQuery, "?sig=") + if !sigFound { + sigFound = strings.Contains(rawQuery, "&sig=") + if !sigFound { + return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation) + } + } + // [?|&]sig= found, redact its value + values, _ := url.ParseQuery(rawQuery) + for name := range values { + if strings.EqualFold(name, "sig") { + values[name] = []string{"REDACTED"} + } + } + return sigFound, values.Encode() +} + +func prepareRequestForLogging(request pipeline.Request) *http.Request { + req := request + if sigFound, rawQuery := redactSigQueryParam(req.URL.RawQuery); sigFound { + // Make copy so we don't destroy the query parameters we actually need to send in the request + req = request.Copy() + req.Request.URL.RawQuery = rawQuery + } + + return prepareRequestForServiceLogging(req) +} + +func stack() []byte { + buf := make([]byte, 1024) + for { + n := runtime.Stack(buf, false) + if n < len(buf) { + return buf[:n] + } + buf = make([]byte, 2*len(buf)) + } +} + +/////////////////////////////////////////////////////////////////////////////////////// +// Redact phase useful for blob and file service only. For other services, +// this method can directly return request.Request. +/////////////////////////////////////////////////////////////////////////////////////// +func prepareRequestForServiceLogging(request pipeline.Request) *http.Request { + req := request + if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist { + req = request.Copy() + url, err := url.Parse(req.Header.Get(key)) + if err == nil { + if sigFound, rawQuery := redactSigQueryParam(url.RawQuery); sigFound { + url.RawQuery = rawQuery + req.Header.Set(xMsCopySourceHeader, url.String()) + } + } + } + return req.Request +} + +const xMsCopySourceHeader = "x-ms-copy-source" + +func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) { + for keyInHeader := range header { + if strings.EqualFold(keyInHeader, key) { + return true, keyInHeader + } + } + return false, "" +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go new file mode 100644 index 000000000..9f458496f --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go @@ -0,0 +1,409 @@ +package azblob + +import ( + "context" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants. +type RetryPolicy int32 + +const ( + // RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy + RetryPolicyExponential RetryPolicy = 0 + + // RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy + RetryPolicyFixed RetryPolicy = 1 +) + +// RetryOptions configures the retry policy's behavior. +type RetryOptions struct { + // Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\ + // A value of zero means that you accept our default policy. + Policy RetryPolicy + + // MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default). + // A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries. + MaxTries int32 + + // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. + // A value of zero means that you accept our default timeout. NOTE: When transferring large amounts + // of data, the default TryTimeout will probably not be sufficient. You should override this value + // based on the bandwidth available to the host machine and proximity to the Storage service. A good + // starting point may be something like (60 seconds per MB of anticipated-payload-size). + TryTimeout time.Duration + + // RetryDelay specifies the amount of delay to use before retrying an operation (0=default). + // When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially + // with each retry up to a maximum specified by MaxRetryDelay. + // If you specify 0, then you must also specify 0 for MaxRetryDelay. + // If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be + // equal to or greater than RetryDelay. + RetryDelay time.Duration + + // MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default). + // If you specify 0, then you must also specify 0 for RetryDelay. + MaxRetryDelay time.Duration + + // RetryReadsFromSecondaryHost specifies whether the retry policy should retry a read operation against another host. + // If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host. + // NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent + // data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs + RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs +} + +func (o RetryOptions) retryReadsFromSecondaryHost() string { + return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only + //return "" // This is for non-blob SDKs +} + +func (o RetryOptions) defaults() RetryOptions { + // We assume the following: + // 1. o.Policy should either be RetryPolicyExponential or RetryPolicyFixed + // 2. o.MaxTries >= 0 + // 3. o.TryTimeout, o.RetryDelay, and o.MaxRetryDelay >=0 + // 4. o.RetryDelay <= o.MaxRetryDelay + // 5. Both o.RetryDelay and o.MaxRetryDelay must be 0 or neither can be 0 + + IfDefault := func(current *time.Duration, desired time.Duration) { + if *current == time.Duration(0) { + *current = desired + } + } + + // Set defaults if unspecified + if o.MaxTries == 0 { + o.MaxTries = 4 + } + switch o.Policy { + case RetryPolicyExponential: + IfDefault(&o.TryTimeout, 1*time.Minute) + IfDefault(&o.RetryDelay, 4*time.Second) + IfDefault(&o.MaxRetryDelay, 120*time.Second) + + case RetryPolicyFixed: + IfDefault(&o.TryTimeout, 1*time.Minute) + IfDefault(&o.RetryDelay, 30*time.Second) + IfDefault(&o.MaxRetryDelay, 120*time.Second) + } + return o +} + +func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0 + pow := func(number int64, exponent int32) int64 { // pow is nested helper function + var result int64 = 1 + for n := int32(0); n < exponent; n++ { + result *= number + } + return result + } + + delay := time.Duration(0) + switch o.Policy { + case RetryPolicyExponential: + delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay + + case RetryPolicyFixed: + if try > 1 { // Any try after the 1st uses the fixed delay + delay = o.RetryDelay + } + } + + // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) + delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand + if delay > o.MaxRetryDelay { + delay = o.MaxRetryDelay + } + return delay +} + +// NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options. +func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { + o = o.defaults() // Force defaults to be calculated + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { + // Before each try, we'll select either the primary or secondary URL. + primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC + + // We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use + considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != "" + + // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) + // When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable + // If using a secondary: + // Even tries go against primary; odd tries go against the secondary + // For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) + // If secondary gets a 404, don't fail, retry but future retries are only against the primary + // When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2)) + for try := int32(1); try <= o.MaxTries; try++ { + logf("\n=====> Try=%d\n", try) + + // Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt. + tryingPrimary := !considerSecondary || (try%2 == 1) + // Select the correct host and delay + if tryingPrimary { + primaryTry++ + delay := o.calcDelay(primaryTry) + logf("Primary try=%d, Delay=%v\n", primaryTry, delay) + time.Sleep(delay) // The 1st try returns 0 delay + } else { + delay := time.Second * time.Duration(rand.Float32()/2+0.8) + logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay) + time.Sleep(delay) // Delay with some jitter before trying secondary + } + + // Clone the original request to ensure that each try starts with the original (unmutated) request. + requestCopy := request.Copy() + + // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because + // the stream may not be at offset 0 when we first get it and we want the same behavior for the + // 1st try as for additional tries. + err = requestCopy.RewindBody() + if err != nil { + return nil, errors.New("we must be able to seek on the Body Stream, otherwise retries would cause data corruption") + } + + if !tryingPrimary { + requestCopy.Request.URL.Host = o.retryReadsFromSecondaryHost() + } + + // Set the server-side timeout query parameter "timeout=[seconds]" + timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try + if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two + t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline + logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t) + if t < timeout { + timeout = t + } + if timeout < 0 { + timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging + } + logf("TryTimeout adjusted to=%d sec\n", timeout) + } + q := requestCopy.Request.URL.Query() + q.Set("timeout", strconv.Itoa(int(timeout+1))) // Add 1 to "round up" + requestCopy.Request.URL.RawQuery = q.Encode() + logf("Url=%s\n", requestCopy.Request.URL.String()) + + // Set the time for this particular retry operation and then Do the operation. + tryCtx, tryCancel := context.WithTimeout(ctx, time.Second*time.Duration(timeout)) + //requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body} + response, err = next.Do(tryCtx, requestCopy) // Make the request + /*err = improveDeadlineExceeded(err) + if err == nil { + response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body} + }*/ + logf("Err=%v, response=%v\n", err, response) + + action := "" // This MUST get changed within the switch code below + switch { + case ctx.Err() != nil: + action = "NoRetry: Op timeout" + case !tryingPrimary && response != nil && response.Response().StatusCode == http.StatusNotFound: + // If attempt was against the secondary & it returned a StatusNotFound (404), then + // the resource was not found. This may be due to replication delay. So, in this + // case, we'll never try the secondary again for this operation. + considerSecondary = false + action = "Retry: Secondary URL returned 404" + case err != nil: + // NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation. + // Use ServiceCode to verify if the error is related to storage service-side, + // ServiceCode is set only when error related to storage service happened. + if stErr, ok := err.(StorageError); ok { + if stErr.Temporary() { + action = "Retry: StorageError with error service code and Temporary()" + } else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError + action = "Retry: StorageError with success status code" + } else { + action = "NoRetry: StorageError not Temporary() and without retriable status code" + } + } else if netErr, ok := err.(net.Error); ok { + // Use non-retriable net.Error list, but not retriable list. + // As there are errors without Temporary() implementation, + // while need be retried, like 'connection reset by peer', 'transport connection broken' and etc. + // So the SDK do retry for most of the case, unless the error should not be retried for sure. + if !isNotRetriable(netErr) { + action = "Retry: net.Error and not in the non-retriable list" + } else { + action = "NoRetry: net.Error and in the non-retriable list" + } + } else { + action = "NoRetry: unrecognized error" + } + default: + action = "NoRetry: successful HTTP request" // no error + } + + logf("Action=%s\n", action) + // fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying + if action[0] != 'R' { // Retry only if action starts with 'R' + if err != nil { + tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context + } else { + // We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper. + // So, when the user closes the Body, the our per-try context gets closed too. + // Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context) + if response == nil || response.Response() == nil { + // We do panic in the case response or response.Response() is nil, + // as for client, the response should not be nil if request is sent and the operations is executed successfully. + // Another option, is that execute the cancel function when response or response.Response() is nil, + // as in this case, current per-try has nothing to do in future. + return nil, errors.New("invalid state, response should not be nil when the operation is executed successfully") + } + response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body} + } + break // Don't retry + } + if response != nil && response.Response() != nil && response.Response().Body != nil { + // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection + body := response.Response().Body + io.Copy(ioutil.Discard, body) + body.Close() + } + // If retrying, cancel the current per-try timeout context + tryCancel() + } + return response, err // Not retryable or too many retries; return the last response/error + } + }) +} + +// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed. +type contextCancelReadCloser struct { + cf context.CancelFunc + body io.ReadCloser +} + +func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { + return rc.body.Read(p) +} + +func (rc *contextCancelReadCloser) Close() error { + err := rc.body.Close() + if rc.cf != nil { + rc.cf() + } + return err +} + +// isNotRetriable checks if the provided net.Error isn't retriable. +func isNotRetriable(errToParse net.Error) bool { + // No error, so this is NOT retriable. + if errToParse == nil { + return true + } + + // The error is either temporary or a timeout so it IS retriable (not not retriable). + if errToParse.Temporary() || errToParse.Timeout() { + return false + } + + genericErr := error(errToParse) + + // From here all the error are neither Temporary() nor Timeout(). + switch err := errToParse.(type) { + case *net.OpError: + // The net.Error is also a net.OpError but the inner error is nil, so this is not retriable. + if err.Err == nil { + return true + } + genericErr = err.Err + } + + switch genericErr.(type) { + case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError: + // If the error is one of the ones listed, then it is NOT retriable. + return true + } + + // If it's invalid header field name/value error thrown by http module, then it is NOT retriable. + // This could happen when metadata's key or value is invalid. (RoundTrip in transport.go) + if strings.Contains(genericErr.Error(), "invalid header field") { + return true + } + + // Assume the error is retriable. + return false +} + +var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent} + +func isSuccessStatusCode(resp *http.Response) bool { + if resp == nil { + return false + } + for _, i := range successStatusCodes { + if i == resp.StatusCode { + return true + } + } + return false +} + +// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away +var logf = func(format string, a ...interface{}) {} + +// Use this version to see the retry method's code path (import "fmt") +//var logf = fmt.Printf + +/* +type deadlineExceededReadCloser struct { + r io.ReadCloser +} + +func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) { + n, err := 0, io.EOF + if r.r != nil { + n, err = r.r.Read(p) + } + return n, improveDeadlineExceeded(err) +} +func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) { + // For an HTTP request, the ReadCloser MUST also implement seek + // For an HTTP response, Seek MUST not be called (or this will panic) + o, err := r.r.(io.Seeker).Seek(offset, whence) + return o, improveDeadlineExceeded(err) +} +func (r *deadlineExceededReadCloser) Close() error { + if c, ok := r.r.(io.Closer); ok { + c.Close() + } + return nil +} + +// timeoutError is the internal struct that implements our richer timeout error. +type deadlineExceeded struct { + responseError +} + +var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time + +// improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error. +func improveDeadlineExceeded(cause error) error { + // If cause is not DeadlineExceeded, return the same error passed in. + if cause != context.DeadlineExceeded { + return cause + } + // Else, convert DeadlineExceeded to our timeoutError which gives a richer string message + return &deadlineExceeded{ + responseError: responseError{ + ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), + }, + } +} + +// Error implements the error interface's Error method to return a string representation of the error. +func (e *deadlineExceeded) Error() string { + return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field") +} +*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go new file mode 100644 index 000000000..608e1051c --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go @@ -0,0 +1,51 @@ +package azblob + +import ( + "bytes" + "context" + "fmt" + "os" + "runtime" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// TelemetryOptions configures the telemetry policy's behavior. +type TelemetryOptions struct { + // Value is a string prepended to each request's User-Agent and sent to the service. + // The service records the user-agent in logs for diagnostics and tracking of client requests. + Value string +} + +// NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects +// which add telemetry information to outgoing HTTP requests. +func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory { + b := &bytes.Buffer{} + b.WriteString(o.Value) + if b.Len() > 0 { + b.WriteRune(' ') + } + fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo) + telemetryValue := b.String() + + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + request.Header.Set("User-Agent", telemetryValue) + return next.Do(ctx, request) + } + }) +} + +// NOTE: the ONLY function that should write to this variable is this func +var platformInfo = func() string { + // Azure-Storage/version (runtime; os type and version)” + // Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)' + operatingSystem := runtime.GOOS // Default OS string + switch operatingSystem { + case "windows": + operatingSystem = os.Getenv("OS") // Get more specific OS information + case "linux": // accept default OS info + case "freebsd": // accept default OS info + } + return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) +}() diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go new file mode 100644 index 000000000..a75c7d1d2 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go @@ -0,0 +1,24 @@ +package azblob + +import ( + "context" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object +// that sets the request's x-ms-client-request-id header if it doesn't already exist. +func NewUniqueRequestIDPolicyFactory() pipeline.Factory { + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + // This is Policy's Do method: + return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + id := request.Header.Get(xMsClientRequestID) + if id == "" { // Add a unique request ID if the caller didn't specify one already + request.Header.Set(xMsClientRequestID, newUUID().String()) + } + return next.Do(ctx, request) + } + }) +} + +const xMsClientRequestID = "x-ms-client-request-id" diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go new file mode 100644 index 000000000..cb4e0e4f0 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go @@ -0,0 +1,113 @@ +package azblob + +import ( + "context" + "io" + "net" + "net/http" +) + +const CountToEnd = 0 + +// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. +type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error) + +// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters +// that should be used to make an HTTP GET request. +type HTTPGetterInfo struct { + // Offset specifies the start offset that should be used when + // creating the HTTP GET request's Range header + Offset int64 + + // Count specifies the count of bytes that should be used to calculate + // the end offset when creating the HTTP GET request's Range header + Count int64 + + // ETag specifies the resource's etag that should be used when creating + // the HTTP GET request's If-Match header + ETag ETag +} + +// RetryReaderOptions contains properties which can help to decide when to do retry. +type RetryReaderOptions struct { + // MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made + // while reading from a RetryReader. A value of zero means that no additional HTTP + // GET requests will be made. + MaxRetryRequests int + doInjectError bool + doInjectErrorRound int +} + +// retryReader implements io.ReaderCloser methods. +// retryReader tries to read from response, and if there is retriable network error +// returned during reading, it will retry according to retry reader option through executing +// user defined action with provided data to get a new response, and continue the overall reading process +// through reading from the new response. +type retryReader struct { + ctx context.Context + response *http.Response + info HTTPGetterInfo + countWasBounded bool + o RetryReaderOptions + getter HTTPGetter +} + +// NewRetryReader creates a retry reader. +func NewRetryReader(ctx context.Context, initialResponse *http.Response, + info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser { + return &retryReader{ctx: ctx, getter: getter, info: info, countWasBounded: info.Count != CountToEnd, response: initialResponse, o: o} +} + +func (s *retryReader) Read(p []byte) (n int, err error) { + for try := 0; ; try++ { + //fmt.Println(try) // Comment out for debugging. + if s.countWasBounded && s.info.Count == CountToEnd { + // User specified an original count and the remaining bytes are 0, return 0, EOF + return 0, io.EOF + } + + if s.response == nil { // We don't have a response stream to read from, try to get one. + response, err := s.getter(s.ctx, s.info) + if err != nil { + return 0, err + } + // Successful GET; this is the network stream we'll read from. + s.response = response + } + n, err := s.response.Body.Read(p) // Read from the stream + + // Injection mechanism for testing. + if s.o.doInjectError && try == s.o.doInjectErrorRound { + err = &net.DNSError{IsTemporary: true} + } + + // We successfully read data or end EOF. + if err == nil || err == io.EOF { + s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + if s.info.Count != CountToEnd { + s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + } + return n, err // Return the return to the caller + } + s.Close() // Error, close stream + s.response = nil // Our stream is no longer good + + // Check the retry count and error code, and decide whether to retry. + if try >= s.o.MaxRetryRequests { + return n, err // All retries exhausted + } + + if _, ok := err.(net.Error); ok { + continue + // Loop around and try to get and read from new stream. + } + return n, err // Not retryable, just return + } +} + +func (s *retryReader) Close() error { + if s.response != nil && s.response.Body != nil { + return s.response.Body.Close() + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go new file mode 100644 index 000000000..860e3a2c5 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go @@ -0,0 +1,218 @@ +package azblob + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" +) + +// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSASSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to SASVersion + Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() + ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() +} + +// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { + return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = SASVersion + } + perms := &AccountSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + + startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + v.Services, + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That right, the account SAS requires a terminating extra newline + "\n") + + signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign) + p := SASQueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: v.Services, + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + return p, nil +} + +// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. +type AccountSASPermissions struct { + Read, Write, Delete, List, Add, Create, Update, Process bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Permissions field. +func (p AccountSASPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.List { + buffer.WriteRune('l') + } + if p.Add { + buffer.WriteRune('a') + } + if p.Create { + buffer.WriteRune('c') + } + if p.Update { + buffer.WriteRune('u') + } + if p.Process { + buffer.WriteRune('p') + } + return buffer.String() +} + +// Parse initializes the AccountSASPermissions's fields from a string. +func (p *AccountSASPermissions) Parse(s string) error { + *p = AccountSASPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'u': + p.Update = true + case 'p': + p.Process = true + default: + return fmt.Errorf("Invalid permission character: '%v'", r) + } + } + return nil +} + +// The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. +type AccountSASServices struct { + Blob, Queue, File bool +} + +// String produces the SAS services string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Services field. +func (s AccountSASServices) String() string { + var buffer bytes.Buffer + if s.Blob { + buffer.WriteRune('b') + } + if s.Queue { + buffer.WriteRune('q') + } + if s.File { + buffer.WriteRune('f') + } + return buffer.String() +} + +// Parse initializes the AccountSASServices' fields from a string. +func (a *AccountSASServices) Parse(s string) error { + *a = AccountSASServices{} // Clear out the flags + for _, r := range s { + switch r { + case 'b': + a.Blob = true + case 'q': + a.Queue = true + case 'f': + a.File = true + default: + return fmt.Errorf("Invalid service character: '%v'", r) + } + } + return nil +} + +// The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. +type AccountSASResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's ResourceTypes field. +func (rt AccountSASResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// Parse initializes the AccountSASResourceType's fields from a string. +func (rt *AccountSASResourceTypes) Parse(s string) error { + *rt = AccountSASResourceTypes{} // Clear out the flags + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return fmt.Errorf("Invalid resource type: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go new file mode 100644 index 000000000..db10171e7 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go @@ -0,0 +1,211 @@ +package azblob + +import ( + "net" + "net/url" + "strings" + "time" +) + +// SASVersion indicates the SAS version. +const SASVersion = ServiceVersion + +type SASProtocol string + +const ( + // SASProtocolHTTPS can be specified for a SAS protocol + SASProtocolHTTPS SASProtocol = "https" + + // SASProtocolHTTPSandHTTP can be specified for a SAS protocol + SASProtocolHTTPSandHTTP SASProtocol = "https,http" +) + +// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a +// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func FormatTimesForSASSigning(startTime, expiryTime time.Time) (string, string) { + ss := "" + if !startTime.IsZero() { + ss = startTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ" + } + se := "" + if !expiryTime.IsZero() { + se = expiryTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ" + } + return ss, se +} + +// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601 + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type SASQueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol SASProtocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` +} + +func (p *SASQueryParameters) Version() string { + return p.version +} + +func (p *SASQueryParameters) Services() string { + return p.services +} +func (p *SASQueryParameters) ResourceTypes() string { + return p.resourceTypes +} +func (p *SASQueryParameters) Protocol() SASProtocol { + return p.protocol +} +func (p *SASQueryParameters) StartTime() time.Time { + return p.startTime +} +func (p *SASQueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +func (p *SASQueryParameters) IPRange() IPRange { + return p.ipRange +} + +func (p *SASQueryParameters) Identifier() string { + return p.identifier +} + +func (p *SASQueryParameters) Resource() string { + return p.resource +} +func (p *SASQueryParameters) Permissions() string { + return p.permissions +} + +func (p *SASQueryParameters) Signature() string { + return p.signature +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters { + p := SASQueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = SASProtocol(val) + case "st": + p.startTime, _ = time.Parse(SASTimeFormat, val) + case "se": + p.expiryTime, _ = time.Parse(SASTimeFormat, val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} + +// AddToValues adds the SAS components to the specified query parameters map. +func (p *SASQueryParameters) addToValues(v url.Values) url.Values { + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", p.startTime.Format(SASTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", p.expiryTime.Format(SASTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + return v +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *SASQueryParameters) Encode() string { + v := url.Values{} + p.addToValues(v) + return v.Encode() +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go new file mode 100644 index 000000000..765beb241 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go @@ -0,0 +1,131 @@ +package azblob + +// https://docs.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes + +const ( + // ServiceCodeNone is the default value. It indicates that the error was related to the service or that the service didn't return a code. + ServiceCodeNone ServiceCodeType = "" + + // ServiceCodeAccountAlreadyExists means the specified account already exists. + ServiceCodeAccountAlreadyExists ServiceCodeType = "AccountAlreadyExists" + + // ServiceCodeAccountBeingCreated means the specified account is in the process of being created (403). + ServiceCodeAccountBeingCreated ServiceCodeType = "AccountBeingCreated" + + // ServiceCodeAccountIsDisabled means the specified account is disabled (403). + ServiceCodeAccountIsDisabled ServiceCodeType = "AccountIsDisabled" + + // ServiceCodeAuthenticationFailed means the server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature (403). + ServiceCodeAuthenticationFailed ServiceCodeType = "AuthenticationFailed" + + // ServiceCodeConditionHeadersNotSupported means the condition headers are not supported (400). + ServiceCodeConditionHeadersNotSupported ServiceCodeType = "ConditionHeadersNotSupported" + + // ServiceCodeConditionNotMet means the condition specified in the conditional header(s) was not met for a read/write operation (304/412). + ServiceCodeConditionNotMet ServiceCodeType = "ConditionNotMet" + + // ServiceCodeEmptyMetadataKey means the key for one of the metadata key-value pairs is empty (400). + ServiceCodeEmptyMetadataKey ServiceCodeType = "EmptyMetadataKey" + + // ServiceCodeInsufficientAccountPermissions means read operations are currently disabled or Write operations are not allowed or The account being accessed does not have sufficient permissions to execute this operation (403). + ServiceCodeInsufficientAccountPermissions ServiceCodeType = "InsufficientAccountPermissions" + + // ServiceCodeInternalError means the server encountered an internal error. Please retry the request (500). + ServiceCodeInternalError ServiceCodeType = "InternalError" + + // ServiceCodeInvalidAuthenticationInfo means the authentication information was not provided in the correct format. Verify the value of Authorization header (400). + ServiceCodeInvalidAuthenticationInfo ServiceCodeType = "InvalidAuthenticationInfo" + + // ServiceCodeInvalidHeaderValue means the value provided for one of the HTTP headers was not in the correct format (400). + ServiceCodeInvalidHeaderValue ServiceCodeType = "InvalidHeaderValue" + + // ServiceCodeInvalidHTTPVerb means the HTTP verb specified was not recognized by the server (400). + ServiceCodeInvalidHTTPVerb ServiceCodeType = "InvalidHttpVerb" + + // ServiceCodeInvalidInput means one of the request inputs is not valid (400). + ServiceCodeInvalidInput ServiceCodeType = "InvalidInput" + + // ServiceCodeInvalidMd5 means the MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded (400). + ServiceCodeInvalidMd5 ServiceCodeType = "InvalidMd5" + + // ServiceCodeInvalidMetadata means the specified metadata is invalid. It includes characters that are not permitted (400). + ServiceCodeInvalidMetadata ServiceCodeType = "InvalidMetadata" + + // ServiceCodeInvalidQueryParameterValue means an invalid value was specified for one of the query parameters in the request URI (400). + ServiceCodeInvalidQueryParameterValue ServiceCodeType = "InvalidQueryParameterValue" + + // ServiceCodeInvalidRange means the range specified is invalid for the current size of the resource (416). + ServiceCodeInvalidRange ServiceCodeType = "InvalidRange" + + // ServiceCodeInvalidResourceName means the specified resource name contains invalid characters (400). + ServiceCodeInvalidResourceName ServiceCodeType = "InvalidResourceName" + + // ServiceCodeInvalidURI means the requested URI does not represent any resource on the server (400). + ServiceCodeInvalidURI ServiceCodeType = "InvalidUri" + + // ServiceCodeInvalidXMLDocument means the specified XML is not syntactically valid (400). + ServiceCodeInvalidXMLDocument ServiceCodeType = "InvalidXmlDocument" + + // ServiceCodeInvalidXMLNodeValue means the value provided for one of the XML nodes in the request body was not in the correct format (400). + ServiceCodeInvalidXMLNodeValue ServiceCodeType = "InvalidXmlNodeValue" + + // ServiceCodeMd5Mismatch means the MD5 value specified in the request did not match the MD5 value calculated by the server (400). + ServiceCodeMd5Mismatch ServiceCodeType = "Md5Mismatch" + + // ServiceCodeMetadataTooLarge means the size of the specified metadata exceeds the maximum size permitted (400). + ServiceCodeMetadataTooLarge ServiceCodeType = "MetadataTooLarge" + + // ServiceCodeMissingContentLengthHeader means the Content-Length header was not specified (411). + ServiceCodeMissingContentLengthHeader ServiceCodeType = "MissingContentLengthHeader" + + // ServiceCodeMissingRequiredQueryParameter means a required query parameter was not specified for this request (400). + ServiceCodeMissingRequiredQueryParameter ServiceCodeType = "MissingRequiredQueryParameter" + + // ServiceCodeMissingRequiredHeader means a required HTTP header was not specified (400). + ServiceCodeMissingRequiredHeader ServiceCodeType = "MissingRequiredHeader" + + // ServiceCodeMissingRequiredXMLNode means a required XML node was not specified in the request body (400). + ServiceCodeMissingRequiredXMLNode ServiceCodeType = "MissingRequiredXmlNode" + + // ServiceCodeMultipleConditionHeadersNotSupported means multiple condition headers are not supported (400). + ServiceCodeMultipleConditionHeadersNotSupported ServiceCodeType = "MultipleConditionHeadersNotSupported" + + // ServiceCodeOperationTimedOut means the operation could not be completed within the permitted time (500). + ServiceCodeOperationTimedOut ServiceCodeType = "OperationTimedOut" + + // ServiceCodeOutOfRangeInput means one of the request inputs is out of range (400). + ServiceCodeOutOfRangeInput ServiceCodeType = "OutOfRangeInput" + + // ServiceCodeOutOfRangeQueryParameterValue means a query parameter specified in the request URI is outside the permissible range (400). + ServiceCodeOutOfRangeQueryParameterValue ServiceCodeType = "OutOfRangeQueryParameterValue" + + // ServiceCodeRequestBodyTooLarge means the size of the request body exceeds the maximum size permitted (413). + ServiceCodeRequestBodyTooLarge ServiceCodeType = "RequestBodyTooLarge" + + // ServiceCodeResourceTypeMismatch means the specified resource type does not match the type of the existing resource (409). + ServiceCodeResourceTypeMismatch ServiceCodeType = "ResourceTypeMismatch" + + // ServiceCodeRequestURLFailedToParse means the url in the request could not be parsed (400). + ServiceCodeRequestURLFailedToParse ServiceCodeType = "RequestUrlFailedToParse" + + // ServiceCodeResourceAlreadyExists means the specified resource already exists (409). + ServiceCodeResourceAlreadyExists ServiceCodeType = "ResourceAlreadyExists" + + // ServiceCodeResourceNotFound means the specified resource does not exist (404). + ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound" + + // ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503). + ServiceCodeServerBusy ServiceCodeType = "ServerBusy" + + // ServiceCodeUnsupportedHeader means one of the HTTP headers specified in the request is not supported (400). + ServiceCodeUnsupportedHeader ServiceCodeType = "UnsupportedHeader" + + // ServiceCodeUnsupportedXMLNode means one of the XML nodes specified in the request body is not supported (400). + ServiceCodeUnsupportedXMLNode ServiceCodeType = "UnsupportedXmlNode" + + // ServiceCodeUnsupportedQueryParameter means one of the query parameters specified in the request URI is not supported (400). + ServiceCodeUnsupportedQueryParameter ServiceCodeType = "UnsupportedQueryParameter" + + // ServiceCodeUnsupportedHTTPVerb means the resource doesn't support the specified HTTP verb (405). + ServiceCodeUnsupportedHTTPVerb ServiceCodeType = "UnsupportedHttpVerb" +) diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go new file mode 100644 index 000000000..e7872a8a3 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go @@ -0,0 +1,111 @@ +package azblob + +import ( + "bytes" + "encoding/xml" + "fmt" + "net/http" + "sort" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +func init() { + // wire up our custom error handling constructor + responseErrorFactory = newStorageError +} + +// ServiceCodeType is a string identifying a storage service error. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2 +type ServiceCodeType string + +// StorageError identifies a responder-generated network or response parsing error. +type StorageError interface { + // ResponseError implements error's Error(), net.Error's Temporary() and Timeout() methods & Response(). + ResponseError + + // ServiceCode returns a service error code. Your code can use this to make error recovery decisions. + ServiceCode() ServiceCodeType +} + +// storageError is the internal struct that implements the public StorageError interface. +type storageError struct { + responseError + serviceCode ServiceCodeType + details map[string]string +} + +// newStorageError creates an error object that implements the error interface. +func newStorageError(cause error, response *http.Response, description string) error { + return &storageError{ + responseError: responseError{ + ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), + response: response, + description: description, + }, + serviceCode: ServiceCodeType(response.Header.Get("x-ms-error-code")), + } +} + +// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them. +func (e *storageError) ServiceCode() ServiceCodeType { + return e.serviceCode +} + +// Error implements the error interface's Error method to return a string representation of the error. +func (e *storageError) Error() string { + b := &bytes.Buffer{} + fmt.Fprintf(b, "===== RESPONSE ERROR (ServiceCode=%s) =====\n", e.serviceCode) + fmt.Fprintf(b, "Description=%s, Details: ", e.description) + if len(e.details) == 0 { + b.WriteString("(none)\n") + } else { + b.WriteRune('\n') + keys := make([]string, 0, len(e.details)) + // Alphabetize the details + for k := range e.details { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fmt.Fprintf(b, " %s: %+v\n", k, e.details[k]) + } + } + req := pipeline.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request + pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil) + return e.ErrorNode.Error(b.String()) +} + +// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503). +func (e *storageError) Temporary() bool { + if e.response != nil { + if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) { + return true + } + } + return e.ErrorNode.Temporary() +} + +// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors. +func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { + tokName := "" + var t xml.Token + for t, err = d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = tt.Name.Local + break + case xml.CharData: + switch tokName { + case "Message": + e.description = string(tt) + default: + if e.details == nil { + e.details = map[string]string{} + } + e.details[tokName] = string(tt) + } + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go new file mode 100644 index 000000000..d7b2507e4 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go @@ -0,0 +1,64 @@ +package azblob + +import ( + "errors" + "fmt" + "io" + "strconv" +) + +// httpRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value httpRange indicates the entire resource. An httpRange +// which has an offset but na zero value count indicates from the offset to the resource's end. +type httpRange struct { + offset int64 + count int64 +} + +func (r httpRange) pointers() *string { + if r.offset == 0 && r.count == CountToEnd { // Do common case first for performance + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.count > 0 { + endOffset = strconv.FormatInt((r.offset+r.count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.offset, endOffset) + return &dataRange +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { + if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long + return 0, nil + } + + err := validateSeekableStreamAt0(body) + if err != nil { + return 0, err + } + + count, err := body.Seek(0, io.SeekEnd) + if err != nil { + return 0, errors.New("body stream must be seekable") + } + + body.Seek(0, io.SeekStart) + return count, nil +} + +// return an error if body is not a valid seekable stream at 0 +func validateSeekableStreamAt0(body io.ReadSeeker) error { + if body == nil { // nil body's are "logically" seekable to 0 + return nil + } + if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { + // Help detect programmer error + if err != nil { + return errors.New("body stream must be seekable") + } + return errors.New("body stream must be set to position 0") + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_uuid.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_uuid.go new file mode 100644 index 000000000..66799f9cb --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_uuid.go @@ -0,0 +1,77 @@ +package azblob + +import ( + "crypto/rand" + "fmt" + "strconv" +) + +// The UUID reserved variants. +const ( + reservedNCS byte = 0x80 + reservedRFC4122 byte = 0x40 + reservedMicrosoft byte = 0x20 + reservedFuture byte = 0x00 +) + +// A UUID representation compliant with specification in RFC 4122 document. +type uuid [16]byte + +// NewUUID returns a new uuid using RFC 4122 algorithm. +func newUUID() (u uuid) { + u = uuid{} + // Set all bits to randomly (or pseudo-randomly) chosen values. + rand.Read(u[:]) + u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) + + var version byte = 4 + u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) + return +} + +// String returns an unparsed version of the generated UUID sequence. +func (u uuid) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f" +// or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID. +func parseUUID(uuidStr string) uuid { + char := func(hexString string) byte { + i, _ := strconv.ParseUint(hexString, 16, 8) + return byte(i) + } + if uuidStr[0] == '{' { + uuidStr = uuidStr[1:] // Skip over the '{' + } + // 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f + // 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33 + // 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45 + uuidVal := uuid{ + char(uuidStr[0:2]), + char(uuidStr[2:4]), + char(uuidStr[4:6]), + char(uuidStr[6:8]), + + char(uuidStr[9:11]), + char(uuidStr[11:13]), + + char(uuidStr[14:16]), + char(uuidStr[16:18]), + + char(uuidStr[19:21]), + char(uuidStr[21:23]), + + char(uuidStr[24:26]), + char(uuidStr[26:28]), + char(uuidStr[28:30]), + char(uuidStr[30:32]), + char(uuidStr[32:34]), + char(uuidStr[34:36]), + } + return uuidVal +} + +func (u uuid) bytes() []byte { + return u[:] +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zt_doc.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zt_doc.go new file mode 100644 index 000000000..6b3779c0e --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zt_doc.go @@ -0,0 +1,89 @@ +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package azblob allows you to manipulate Azure Storage containers and blobs objects. + +URL Types + +The most common types you'll work with are the XxxURL types. The methods of these types make requests +against the Azure Storage Service. + + - ServiceURL's methods perform operations on a storage account. + - ContainerURL's methods perform operations on an account's container. + - BlockBlobURL's methods perform operations on a container's block blob. + - AppendBlobURL's methods perform operations on a container's append blob. + - PageBlobURL's methods perform operations on a container's page blob. + - BlobURL's methods perform operations on a container's blob regardless of the blob's type. + +Internally, each XxxURL object contains a URL and a request pipeline. The URL indicates the endpoint where each HTTP +request is sent and the pipeline indicates how the outgoing HTTP request and incoming HTTP response is processed. +The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more. + +Pipelines are threadsafe and may be shared by multiple XxxURL objects. When you create a ServiceURL, you pass +an initial pipeline. When you call ServiceURL's NewContainerURL method, the new ContainerURL object has its own +URL but it shares the same pipeline as the parent ServiceURL object. + +To work with a blob, call one of ContainerURL's 4 NewXxxBlobURL methods depending on how you want to treat the blob. +To treat the blob as a block blob, append blob, or page blob, call NewBlockBlobURL, NewAppendBlobURL, or NewPageBlobURL +respectively. These three types are all identical except for the methods they expose; each type exposes the methods +relevant to the type of blob represented. If you're not sure how you want to treat a blob, you can call NewBlobURL; +this returns an object whose methods are relevant to any kind of blob. When you call ContainerURL's NewXxxBlobURL, +the new XxxBlobURL object has its own URL but it shares the same pipeline as the parent ContainerURL object. You +can easily switch between blob types (method sets) by calling a ToXxxBlobURL method. + +If you'd like to use a different pipeline with a ServiceURL, ContainerURL, or XxxBlobURL object, then call the XxxURL +object's WithPipeline method passing in the desired pipeline. The WithPipeline methods create a new XxxURL object +with the same URL as the original but with the specified pipeline. + +Note that XxxURL objects use little memory, are goroutine-safe, and many objects share the same pipeline. This means that +XxxURL objects share a lot of system resources making them very efficient. + +All of XxxURL's methods that make HTTP requests return rich error handling information so you can discern network failures, +transient failures, timeout failures, service failures, etc. See the StorageError interface for more information and an +example of how to do deal with errors. + +URL and Shared Access Signature Manipulation + +The library includes a BlobURLParts type for deconstructing and reconstructing URLs. And you can use the following types +for generating and parsing Shared Access Signature (SAS) + - Use the AccountSASSignatureValues type to create a SAS for a storage account. + - Use the BlobSASSignatureValues type to create a SAS for a container or blob. + - Use the SASQueryParameters type to turn signature values in to query parameres or to parse query parameters. + +To generate a SAS, you must use the SharedKeyCredential type. + +Credentials + +When creating a request pipeline, you must specify one of this package's credential types. + - Call the NewAnonymousCredential function for requests that contain a Shared Access Signature (SAS). + - Call the NewSharedKeyCredential function (with an account name & key) to access any account resources. You must also use this + to generate Shared Access Signatures. + +HTTP Request Policy Factories + +This package defines several request policy factories for use with the pipeline package. +Most applications will not use these factories directly; instead, the NewPipeline +function creates these factories, initializes them (via the PipelineOptions type) +and returns a pipeline object for use by the XxxURL objects. + +However, for advanced scenarios, developers can access these policy factories directly +and even create their own and then construct their own pipeline in order to affect HTTP +requests and responses performed by the XxxURL objects. For example, developers can +introduce their own logging, random failures, request recording & playback for fast +testing, HTTP request pacing, alternate retry mechanisms, metering, metrics, etc. The +possibilities are endless! + +Below are the request pipeline policy factory functions that are provided with this +package: + - NewRetryPolicyFactory Enables rich retry semantics for failed HTTP requests. + - NewRequestLogPolicyFactory Enables rich logging support for HTTP requests/responses & failures. + - NewTelemetryPolicyFactory Enables simple modification of the HTTP request's User-Agent header so each request reports the SDK version & language/runtime making the requests. + - NewUniqueRequestIDPolicyFactory Adds a x-ms-client-request-id header with a unique UUID value to an HTTP request to help with diagnosing failures. + +Also, note that all the NewXxxCredential functions return request policy factory objects which get injected into the pipeline. +*/ +package azblob + +// TokenCredential Use this to access resources using Role-Based Access Control (RBAC). diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go new file mode 100644 index 000000000..89f29bdca --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go @@ -0,0 +1,235 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// appendBlobClient is the client for the AppendBlob methods of the Azblob service. +type appendBlobClient struct { + managementClient +} + +// newAppendBlobClient creates an instance of the appendBlobClient client. +func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient { + return appendBlobClient{newManagementClient(url, p)} +} + +// AppendBlock the Append Block operation commits a new block of data to the end of an existing append blob. The Append +// Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is +// supported only on version 2015-02-21 version or later. +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to +// be validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active +// and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. +// If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than +// the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code +// 412 - Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. +// A number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to +// this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 +// - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been +// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if +// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs +// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockResponder}, req) + if err != nil { + return nil, err + } + return resp.(*AppendBlobAppendBlockResponse), err +} + +// appendBlockPreparer prepares the AppendBlock request. +func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "appendblock") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if maxSize != nil { + req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10)) + } + if appendPosition != nil { + req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// appendBlockResponder handles the response to the AppendBlock request. +func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &AppendBlobAppendBlockResponse{rawResponse: resp.Response()}, err +} + +// Create the Create Append Blob operation creates a new append blob. +// +// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. blobContentType is optional. Sets the blob's content type. If specified, +// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the +// blob's content encoding. If specified, this property is stored with the blob and returned with a read request. +// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the +// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this +// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded. +// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and +// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the +// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the +// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified +// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, +// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and +// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is +// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobCreateResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) + if err != nil { + return nil, err + } + return resp.(*AppendBlobCreateResponse), err +} + +// createPreparer prepares the Create request. +func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-blob-type", "AppendBlob") + return req, nil +} + +// createResponder handles the response to the Create request. +func (client appendBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &AppendBlobCreateResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go new file mode 100644 index 000000000..aa8e0c68a --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go @@ -0,0 +1,1261 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/base64" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// blobClient is the client for the Blob methods of the Azblob service. +type blobClient struct { + managementClient +} + +// newBlobClient creates an instance of the blobClient client. +func newBlobClient(url url.URL, p pipeline.Pipeline) blobClient { + return blobClient{newManagementClient(url, p)} +} + +// AbortCopyFromURL the Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a +// destination blob with zero length and full metadata. +// +// copyID is the copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. timeout is +// the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) AbortCopyFromURL(ctx context.Context, copyID string, timeout *int32, leaseID *string, requestID *string) (*BlobAbortCopyFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.abortCopyFromURLPreparer(copyID, timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.abortCopyFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobAbortCopyFromURLResponse), err +} + +// abortCopyFromURLPreparer prepares the AbortCopyFromURL request. +func (client blobClient) abortCopyFromURLPreparer(copyID string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("copyid", copyID) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "copy") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-copy-action", "abort") + return req, nil +} + +// abortCopyFromURLResponder handles the response to the AbortCopyFromURL request. +func (client blobClient) abortCopyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusNoContent) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobAbortCopyFromURLResponse{rawResponse: resp.Response()}, err +} + +// AcquireLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. duration is specifies the duration of the lease, in seconds, or negative +// one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration +// cannot be changed using renew or change. proposedLeaseID is proposed lease ID, in a GUID string format. The Blob +// service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor +// (String) for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +// recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobAcquireLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.acquireLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobAcquireLeaseResponse), err +} + +// acquireLeasePreparer prepares the AcquireLease request. +func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + if duration != nil { + req.Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*duration), 10)) + } + if proposedLeaseID != nil { + req.Header.Set("x-ms-proposed-lease-id", *proposedLeaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "acquire") + return req, nil +} + +// acquireLeaseResponder handles the response to the AcquireLease request. +func (client blobClient) acquireLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobAcquireLeaseResponse{rawResponse: resp.Response()}, err +} + +// BreakLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. breakPeriod is for a break operation, proposed duration the lease should +// continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the +// time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available +// before the break period has expired, but the lease may be held for longer than the break period. If this header does +// not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an +// infinite lease breaks immediately. ifModifiedSince is specify this header value to operate only on a blob if it has +// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on +// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobBreakLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.breakLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobBreakLeaseResponse), err +} + +// breakLeasePreparer prepares the BreakLease request. +func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + if breakPeriod != nil { + req.Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*breakPeriod), 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "break") + return req, nil +} + +// breakLeaseResponder handles the response to the BreakLease request. +func (client blobClient) breakLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobBreakLeaseResponse{rawResponse: resp.Response()}, err +} + +// ChangeLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// leaseID is specifies the current lease ID on the resource. proposedLeaseID is proposed lease ID, in a GUID string +// format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See +// Guid Constructor (String) for a list of valid GUID string formats. timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded +// in the analytics logs when storage analytics logging is enabled. +func (client blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobChangeLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.changeLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobChangeLeaseResponse), err +} + +// changeLeasePreparer prepares the ChangeLease request. +func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + req.Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "change") + return req, nil +} + +// changeLeaseResponder handles the response to the ChangeLease request. +func (client blobClient) changeLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobChangeLeaseResponse{rawResponse: resp.Response()}, err +} + +// CreateSnapshot the Create Snapshot operation creates a read-only snapshot of a blob +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. leaseID is if specified, the operation only succeeds if the resource's lease is active and +// matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded +// in the analytics logs when storage analytics logging is enabled. +func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobCreateSnapshotResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createSnapshotPreparer(timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createSnapshotResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobCreateSnapshotResponse), err +} + +// createSnapshotPreparer prepares the CreateSnapshot request. +func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "snapshot") + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// createSnapshotResponder handles the response to the CreateSnapshot request. +func (client blobClient) createSnapshotResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobCreateSnapshotResponse{rawResponse: resp.Response()}, err +} + +// Delete if the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently +// removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is +// deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob +// or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] +// (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently +// removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it +// is permanently removed. Use the List Blobs API and specify the "include=deleted" query parameter to discover which +// blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. +// All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 +// (ResourceNotFound). +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. deleteSnapshots is required if the blob has associated snapshots. Specify one +// of the following two options: include: Delete the base blob and all of its snapshots. only: Delete only the blob's +// snapshots and not the blob itself ifModifiedSince is specify this header value to operate only on a blob if it has +// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on +// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) Delete(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobDeleteResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.deletePreparer(snapshot, timeout, leaseID, deleteSnapshots, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobDeleteResponse), err +} + +// deletePreparer prepares the Delete request. +func (client blobClient) deletePreparer(snapshot *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("DELETE", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if deleteSnapshots != DeleteSnapshotsOptionNone { + req.Header.Set("x-ms-delete-snapshots", string(deleteSnapshots)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// deleteResponder handles the response to the Delete request. +func (client blobClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobDeleteResponse{rawResponse: resp.Response()}, err +} + +// Download the Download operation reads or downloads a blob from the system, including its metadata and properties. +// You can also call Download to read a snapshot. +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// rangeGetContentMD5 is when set to true and specified together with the Range, the service returns the MD5 hash for +// the range, as long as the range is less than or equal to 4 MB in size. ifModifiedSince is specify this header value +// to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this +// header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify +// an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only +// on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) Download(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*downloadResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.downloadPreparer(snapshot, timeout, rangeParameter, leaseID, rangeGetContentMD5, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.downloadResponder}, req) + if err != nil { + return nil, err + } + return resp.(*downloadResponse), err +} + +// downloadPreparer prepares the Download request. +func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if rangeGetContentMD5 != nil { + req.Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*rangeGetContentMD5)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// downloadResponder handles the response to the Download request. +func (client blobClient) downloadResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusPartialContent) + if resp == nil { + return nil, err + } + return &downloadResponse{rawResponse: resp.Response()}, err +} + +// GetAccountInfo returns the sku name and account kind +func (client blobClient) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { + req, err := client.getAccountInfoPreparer() + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobGetAccountInfoResponse), err +} + +// getAccountInfoPreparer prepares the GetAccountInfo request. +func (client blobClient) getAccountInfoPreparer() (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("restype", "account") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + return req, nil +} + +// getAccountInfoResponder handles the response to the GetAccountInfo request. +func (client blobClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobGetAccountInfoResponse{rawResponse: resp.Response()}, err +} + +// GetProperties the Get Properties operation returns all user-defined metadata, standard HTTP properties, and system +// properties for the blob. It does not return the content of the blob. +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it +// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on +// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) GetProperties(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobGetPropertiesResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPropertiesPreparer(snapshot, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobGetPropertiesResponse), err +} + +// getPropertiesPreparer prepares the GetProperties request. +func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("HEAD", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPropertiesResponder handles the response to the GetProperties request. +func (client blobClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobGetPropertiesResponse{rawResponse: resp.Response()}, err +} + +// ReleaseLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded +// in the analytics logs when storage analytics logging is enabled. +func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobReleaseLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.releaseLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobReleaseLeaseResponse), err +} + +// releaseLeasePreparer prepares the ReleaseLease request. +func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "release") + return req, nil +} + +// releaseLeaseResponder handles the response to the ReleaseLease request. +func (client blobClient) releaseLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobReleaseLeaseResponse{rawResponse: resp.Response()}, err +} + +// RenewLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded +// in the analytics logs when storage analytics logging is enabled. +func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobRenewLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renewLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobRenewLeaseResponse), err +} + +// renewLeasePreparer prepares the RenewLease request. +func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "renew") + return req, nil +} + +// renewLeaseResponder handles the response to the RenewLease request. +func (client blobClient) renewLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobRenewLeaseResponse{rawResponse: resp.Response()}, err +} + +// SetHTTPHeaders the Set HTTP Headers operation sets system properties on the blob +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. blobCacheControl is optional. Sets the blob's cache control. If specified, +// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's +// content type. If specified, this property is stored with the blob and returned with a read request. blobContentMD5 +// is optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual +// blocks were validated when each was uploaded. blobContentEncoding is optional. Sets the blob's content encoding. If +// specified, this property is stored with the blob and returned with a read request. blobContentLanguage is optional. +// Set the blob's content language. If specified, this property is stored with the blob and returned with a read +// request. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobContentDisposition is +// optional. Sets the blob's Content-Disposition header. requestID is provides a client-generated, opaque value with a +// 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) SetHTTPHeaders(ctx context.Context, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobContentDisposition *string, requestID *string) (*BlobSetHTTPHeadersResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setHTTPHeadersPreparer(timeout, blobCacheControl, blobContentType, blobContentMD5, blobContentEncoding, blobContentLanguage, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobContentDisposition, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setHTTPHeadersResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetHTTPHeadersResponse), err +} + +// setHTTPHeadersPreparer prepares the SetHTTPHeaders request. +func (client blobClient) setHTTPHeadersPreparer(timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobContentDisposition *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// setHTTPHeadersResponder handles the response to the SetHTTPHeaders request. +func (client blobClient) setHTTPHeadersResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetHTTPHeadersResponse{rawResponse: resp.Response()}, err +} + +// SetMetadata the Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more +// name-value pairs +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. leaseID is if specified, the operation only succeeds if the +// resource's lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +// recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) SetMetadata(ctx context.Context, timeout *int32, metadata map[string]string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobSetMetadataResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setMetadataPreparer(timeout, metadata, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetMetadataResponse), err +} + +// setMetadataPreparer prepares the SetMetadata request. +func (client blobClient) setMetadataPreparer(timeout *int32, metadata map[string]string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "metadata") + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// setMetadataResponder handles the response to the SetMetadata request. +func (client blobClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetMetadataResponse{rawResponse: resp.Response()}, err +} + +// SetTier the Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage +// account and on a block blob in a blob storage account (locally redundant storage only). A premium page blob's tier +// determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive +// storage type. This operation does not update the blob's ETag. +// +// tier is indicates the tier to be set on the blob. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if +// specified, the operation only succeeds if the resource's lease is active and matches this ID. +func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, timeout *int32, requestID *string, leaseID *string) (*BlobSetTierResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setTierPreparer(tier, timeout, requestID, leaseID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setTierResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetTierResponse), err +} + +// setTierPreparer prepares the SetTier request. +func (client blobClient) setTierPreparer(tier AccessTierType, timeout *int32, requestID *string, leaseID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "tier") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-access-tier", string(tier)) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + return req, nil +} + +// setTierResponder handles the response to the SetTier request. +func (client blobClient) setTierResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetTierResponse{rawResponse: resp.Response()}, err +} + +// StartCopyFromURL the Start Copy From URL operation copies a blob or an internet resource to a new blob. +// +// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that +// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob +// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. sourceIfModifiedSince is specify this header value to operate +// only on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header +// value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify +// an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate +// only on blobs without a matching value. ifModifiedSince is specify this header value to operate only on a blob if it +// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on +// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobStartCopyFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.startCopyFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobStartCopyFromURLResponse), err +} + +// startCopyFromURLPreparer prepares the StartCopyFromURL request. +func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if sourceIfModifiedSince != nil { + req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfUnmodifiedSince != nil { + req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfMatch != nil { + req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) + } + if sourceIfNoneMatch != nil { + req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-copy-source", copySource) + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// startCopyFromURLResponder handles the response to the StartCopyFromURL request. +func (client blobClient) startCopyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobStartCopyFromURLResponse{rawResponse: resp.Response()}, err +} + +// Undelete undelete a blob that was previously soft deleted +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) Undelete(ctx context.Context, timeout *int32, requestID *string) (*BlobUndeleteResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.undeletePreparer(timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.undeleteResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobUndeleteResponse), err +} + +// undeletePreparer prepares the Undelete request. +func (client blobClient) undeletePreparer(timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "undelete") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// undeleteResponder handles the response to the Undelete request. +func (client blobClient) undeleteResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobUndeleteResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go new file mode 100644 index 000000000..a8105b54f --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go @@ -0,0 +1,494 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// blockBlobClient is the client for the BlockBlob methods of the Azblob service. +type blockBlobClient struct { + managementClient +} + +// newBlockBlobClient creates an instance of the blockBlobClient client. +func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient { + return blockBlobClient{newManagementClient(url, p)} +} + +// CommitBlockList the Commit Block List operation writes a blob by specifying the list of block IDs that make up the +// blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior +// Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed, +// then committing the new and existing blocks together. You can do this by specifying whether to commit a block from +// the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the +// block, whichever list it may belong to. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. blobCacheControl is optional. Sets the blob's cache control. If specified, +// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's +// content type. If specified, this property is stored with the blob and returned with a read request. +// blobContentEncoding is optional. Sets the blob's content encoding. If specified, this property is stored with the +// blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If +// specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An +// MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were +// validated when each was uploaded. metadata is optional. Specifies a user-defined name-value pair associated with the +// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the +// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified +// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, +// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and +// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is +// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobCommitBlockListResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.commitBlockListResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobCommitBlockListResponse), err +} + +// commitBlockListPreparer prepares the CommitBlockList request. +func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "blocklist") + req.URL.RawQuery = params.Encode() + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + b, err := xml.Marshal(blocks) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// commitBlockListResponder handles the response to the CommitBlockList request. +func (client blockBlobClient) commitBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobCommitBlockListResponse{rawResponse: resp.Response()}, err +} + +// GetBlockList the Get Block List operation retrieves the list of blocks that have been uploaded as part of a block +// blob +// +// listType is specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists +// together. snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob +// snapshot to retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (*BlockList, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getBlockListResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockList), err +} + +// getBlockListPreparer prepares the GetBlockList request. +func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + params.Set("blocklisttype", string(listType)) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "blocklist") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getBlockListResponder handles the response to the GetBlockList request. +func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &BlockList{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// StageBlock the Stage Block operation creates a new block to be committed as part of a blob +// +// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or +// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the +// same size for each block. contentLength is the length of the request. body is initial data body will be closed upon +// successful return. Callers should ensure closure when receiving an error.transactionalContentMD5 is specify the +// transactional md5 for the body, to be validated by the service. timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobStageBlockResponse), err +} + +// stageBlockPreparer prepares the StageBlock request. +func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("blockid", blockID) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "block") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// stageBlockResponder handles the response to the StageBlock request. +func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobStageBlockResponse{rawResponse: resp.Response()}, err +} + +// StageBlockFromURL the Stage Block operation creates a new block to be committed as part of a blob where the contents +// are read from a URL. +// +// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or +// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the +// same size for each block. contentLength is the length of the request. sourceURL is specify a URL to the copy source. +// sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated for the +// range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in seconds. For +// more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobStageBlockFromURLResponse), err +} + +// stageBlockFromURLPreparer prepares the StageBlockFromURL request. +func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("blockid", blockID) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "block") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Header.Set("x-ms-copy-source", sourceURL) + if sourceRange != nil { + req.Header.Set("x-ms-source-range", *sourceRange) + } + if sourceContentMD5 != nil { + req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// stageBlockFromURLResponder handles the response to the StageBlockFromURL request. +func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobStageBlockFromURLResponse{rawResponse: resp.Response()}, err +} + +// Upload the Upload Block Blob operation updates the content of an existing block blob. Updating an existing block +// blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of +// the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a +// block blob, use the Put Block List operation. +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. blobContentType is optional. Sets the blob's content type. If specified, +// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the +// blob's content encoding. If specified, this property is stored with the blob and returned with a read request. +// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the +// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this +// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded. +// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and +// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the +// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the +// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified +// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, +// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and +// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is +// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobUploadResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.uploadPreparer(body, contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobUploadResponse), err +} + +// uploadPreparer prepares the Upload request. +func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-blob-type", "BlockBlob") + return req, nil +} + +// uploadResponder handles the response to the Upload request. +func (client blockBlobClient) uploadResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobUploadResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go new file mode 100644 index 000000000..b42a79b15 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go @@ -0,0 +1,38 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/azure-pipeline-go/pipeline" + "net/url" +) + +const ( + // ServiceVersion specifies the version of the operations used in this package. + ServiceVersion = "2018-03-28" +) + +// managementClient is the base client for Azblob. +type managementClient struct { + url url.URL + p pipeline.Pipeline +} + +// newManagementClient creates an instance of the managementClient client. +func newManagementClient(url url.URL, p pipeline.Pipeline) managementClient { + return managementClient{ + url: url, + p: p, + } +} + +// URL returns a copy of the URL for this client. +func (mc managementClient) URL() url.URL { + return mc.url +} + +// Pipeline returns the pipeline for this client. +func (mc managementClient) Pipeline() pipeline.Pipeline { + return mc.p +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go new file mode 100644 index 000000000..599e8118c --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go @@ -0,0 +1,1037 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// containerClient is the client for the Container methods of the Azblob service. +type containerClient struct { + managementClient +} + +// newContainerClient creates an instance of the containerClient client. +func newContainerClient(url url.URL, p pipeline.Pipeline) containerClient { + return containerClient{newManagementClient(url, p)} +} + +// AcquireLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be +// 15 to 60 seconds, or can be infinite +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. duration is specifies the duration of the lease, in seconds, or negative +// one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration +// cannot be changed using renew or change. proposedLeaseID is proposed lease ID, in a GUID string format. The Blob +// service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor +// (String) for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client containerClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerAcquireLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.acquireLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerAcquireLeaseResponse), err +} + +// acquireLeasePreparer prepares the AcquireLease request. +func (client containerClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if duration != nil { + req.Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*duration), 10)) + } + if proposedLeaseID != nil { + req.Header.Set("x-ms-proposed-lease-id", *proposedLeaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "acquire") + return req, nil +} + +// acquireLeaseResponder handles the response to the AcquireLease request. +func (client containerClient) acquireLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerAcquireLeaseResponse{rawResponse: resp.Response()}, err +} + +// BreakLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. breakPeriod is for a break operation, proposed duration the lease should +// continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the +// time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available +// before the break period has expired, but the lease may be held for longer than the break period. If this header does +// not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an +// infinite lease breaks immediately. ifModifiedSince is specify this header value to operate only on a blob if it has +// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. requestID is provides a client-generated, opaque +// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerBreakLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.breakLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerBreakLeaseResponse), err +} + +// breakLeasePreparer prepares the BreakLease request. +func (client containerClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if breakPeriod != nil { + req.Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*breakPeriod), 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "break") + return req, nil +} + +// breakLeaseResponder handles the response to the BreakLease request. +func (client containerClient) breakLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerBreakLeaseResponse{rawResponse: resp.Response()}, err +} + +// ChangeLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be +// 15 to 60 seconds, or can be infinite +// +// leaseID is specifies the current lease ID on the resource. proposedLeaseID is proposed lease ID, in a GUID string +// format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See +// Guid Constructor (String) for a list of valid GUID string formats. timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client containerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerChangeLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.changeLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerChangeLeaseResponse), err +} + +// changeLeasePreparer prepares the ChangeLease request. +func (client containerClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + req.Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "change") + return req, nil +} + +// changeLeaseResponder handles the response to the ChangeLease request. +func (client containerClient) changeLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerChangeLeaseResponse{rawResponse: resp.Response()}, err +} + +// Create creates a new container under the specified account. If the container with the same name already exists, the +// operation fails +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. access is specifies whether data in the container may be +// accessed publicly and the level of access requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) Create(ctx context.Context, timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string) (*ContainerCreateResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createPreparer(timeout, metadata, access, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerCreateResponse), err +} + +// createPreparer prepares the Create request. +func (client containerClient) createPreparer(timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if access != PublicAccessNone { + req.Header.Set("x-ms-blob-public-access", string(access)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// createResponder handles the response to the Create request. +func (client containerClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerCreateResponse{rawResponse: resp.Response()}, err +} + +// Delete operation marks the specified container for deletion. The container and any blobs contained within it are +// later deleted during garbage collection +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it +// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. requestID is provides a client-generated, opaque +// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) Delete(ctx context.Context, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerDeleteResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.deletePreparer(timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerDeleteResponse), err +} + +// deletePreparer prepares the Delete request. +func (client containerClient) deletePreparer(timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("DELETE", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// deleteResponder handles the response to the Delete request. +func (client containerClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerDeleteResponse{rawResponse: resp.Response()}, err +} + +// GetAccessPolicy gets the permissions for the specified container. The permissions indicate whether container data +// may be accessed publicly. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) GetAccessPolicy(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*SignedIdentifiers, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getAccessPolicyPreparer(timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccessPolicyResponder}, req) + if err != nil { + return nil, err + } + return resp.(*SignedIdentifiers), err +} + +// getAccessPolicyPreparer prepares the GetAccessPolicy request. +func (client containerClient) getAccessPolicyPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "acl") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getAccessPolicyResponder handles the response to the GetAccessPolicy request. +func (client containerClient) getAccessPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &SignedIdentifiers{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetAccountInfo returns the sku name and account kind +func (client containerClient) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) { + req, err := client.getAccountInfoPreparer() + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerGetAccountInfoResponse), err +} + +// getAccountInfoPreparer prepares the GetAccountInfo request. +func (client containerClient) getAccountInfoPreparer() (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("restype", "account") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + return req, nil +} + +// getAccountInfoResponder handles the response to the GetAccountInfo request. +func (client containerClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerGetAccountInfoResponse{rawResponse: resp.Response()}, err +} + +// GetProperties returns all user-defined metadata and system properties for the specified container. The data returned +// does not include the container's list of blobs +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) GetProperties(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*ContainerGetPropertiesResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPropertiesPreparer(timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerGetPropertiesResponse), err +} + +// getPropertiesPreparer prepares the GetProperties request. +func (client containerClient) getPropertiesPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPropertiesResponder handles the response to the GetProperties request. +func (client containerClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerGetPropertiesResponse{rawResponse: resp.Response()}, err +} + +// ListBlobFlatSegment [Update] The List Blobs operation returns a list of the blobs under the specified container +// +// prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a +// string value that identifies the portion of the list of containers to be returned with the next listing operation. +// The operation returns the NextMarker value within the response body if the listing operation did not return all +// containers remaining to be listed with the current page. The NextMarker value can be used as the value for the +// marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the +// client. maxresults is specifies the maximum number of containers to return. If the request does not specify +// maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the +// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the +// remainder of the results. For this reason, it is possible that the service will return fewer results than specified +// by maxresults, or than the default of 5000. include is include this parameter to specify one or more datasets to +// include in the response. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) ListBlobFlatSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (*ListBlobsFlatSegmentResponse, error) { + if err := validate([]validation{ + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.listBlobFlatSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listBlobFlatSegmentResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ListBlobsFlatSegmentResponse), err +} + +// listBlobFlatSegmentPreparer prepares the ListBlobFlatSegment request. +func (client containerClient) listBlobFlatSegmentPreparer(prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if prefix != nil && len(*prefix) > 0 { + params.Set("prefix", *prefix) + } + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + if include != nil && len(include) > 0 { + params.Set("include", joinConst(include, ",")) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "list") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// listBlobFlatSegmentResponder handles the response to the ListBlobFlatSegment request. +func (client containerClient) listBlobFlatSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &ListBlobsFlatSegmentResponse{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// ListBlobHierarchySegment [Update] The List Blobs operation returns a list of the blobs under the specified container +// +// delimiter is when the request includes this parameter, the operation returns a BlobPrefix element in the response +// body that acts as a placeholder for all blobs whose names begin with the same substring up to the appearance of the +// delimiter character. The delimiter may be a single character or a string. prefix is filters the results to return +// only containers whose name begins with the specified prefix. marker is a string value that identifies the portion of +// the list of containers to be returned with the next listing operation. The operation returns the NextMarker value +// within the response body if the listing operation did not return all containers remaining to be listed with the +// current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request +// the next page of list items. The marker value is opaque to the client. maxresults is specifies the maximum number of +// containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server +// will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will +// return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the +// service will return fewer results than specified by maxresults, or than the default of 5000. include is include this +// parameter to specify one or more datasets to include in the response. timeout is the timeout parameter is expressed +// in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) ListBlobHierarchySegment(ctx context.Context, delimiter string, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (*ListBlobsHierarchySegmentResponse, error) { + if err := validate([]validation{ + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.listBlobHierarchySegmentPreparer(delimiter, prefix, marker, maxresults, include, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listBlobHierarchySegmentResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ListBlobsHierarchySegmentResponse), err +} + +// listBlobHierarchySegmentPreparer prepares the ListBlobHierarchySegment request. +func (client containerClient) listBlobHierarchySegmentPreparer(delimiter string, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if prefix != nil && len(*prefix) > 0 { + params.Set("prefix", *prefix) + } + params.Set("delimiter", delimiter) + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + if include != nil && len(include) > 0 { + params.Set("include", joinConst(include, ",")) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "list") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// listBlobHierarchySegmentResponder handles the response to the ListBlobHierarchySegment request. +func (client containerClient) listBlobHierarchySegmentResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &ListBlobsHierarchySegmentResponse{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// ReleaseLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be +// 15 to 60 seconds, or can be infinite +// +// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client containerClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerReleaseLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.releaseLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerReleaseLeaseResponse), err +} + +// releaseLeasePreparer prepares the ReleaseLease request. +func (client containerClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "release") + return req, nil +} + +// releaseLeaseResponder handles the response to the ReleaseLease request. +func (client containerClient) releaseLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerReleaseLeaseResponse{rawResponse: resp.Response()}, err +} + +// RenewLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// +// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client containerClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerRenewLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renewLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerRenewLeaseResponse), err +} + +// renewLeasePreparer prepares the RenewLease request. +func (client containerClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "renew") + return req, nil +} + +// renewLeaseResponder handles the response to the RenewLease request. +func (client containerClient) renewLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerRenewLeaseResponse{rawResponse: resp.Response()}, err +} + +// SetAccessPolicy sets the permissions for the specified container. The permissions indicate whether blobs in a +// container may be accessed publicly. +// +// containerACL is the acls for the container timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. access is specifies whether data in the container may be accessed publicly and +// the level of access ifModifiedSince is specify this header value to operate only on a blob if it has been modified +// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has +// not been modified since the specified date/time. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) SetAccessPolicy(ctx context.Context, containerACL []SignedIdentifier, timeout *int32, leaseID *string, access PublicAccessType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerSetAccessPolicyResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setAccessPolicyPreparer(containerACL, timeout, leaseID, access, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setAccessPolicyResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerSetAccessPolicyResponse), err +} + +// setAccessPolicyPreparer prepares the SetAccessPolicy request. +func (client containerClient) setAccessPolicyPreparer(containerACL []SignedIdentifier, timeout *int32, leaseID *string, access PublicAccessType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "acl") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if access != PublicAccessNone { + req.Header.Set("x-ms-blob-public-access", string(access)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + b, err := xml.Marshal(SignedIdentifiers{Items: containerACL}) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// setAccessPolicyResponder handles the response to the SetAccessPolicy request. +func (client containerClient) setAccessPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerSetAccessPolicyResponse{rawResponse: resp.Response()}, err +} + +// SetMetadata operation sets one or more user-defined name-value pairs for the specified container. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. metadata is optional. Specifies a user-defined name-value pair associated with +// the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to +// the destination blob. If one or more name-value pairs are specified, the destination blob is created with the +// specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. requestID is provides a client-generated, opaque +// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) SetMetadata(ctx context.Context, timeout *int32, leaseID *string, metadata map[string]string, ifModifiedSince *time.Time, requestID *string) (*ContainerSetMetadataResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setMetadataPreparer(timeout, leaseID, metadata, ifModifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerSetMetadataResponse), err +} + +// setMetadataPreparer prepares the SetMetadata request. +func (client containerClient) setMetadataPreparer(timeout *int32, leaseID *string, metadata map[string]string, ifModifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "metadata") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// setMetadataResponder handles the response to the SetMetadata request. +func (client containerClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerSetMetadataResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go new file mode 100644 index 000000000..5c0434f50 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go @@ -0,0 +1,4796 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "encoding/base64" + "encoding/xml" + "errors" + "io" + "net/http" + "reflect" + "strconv" + "strings" + "time" + "unsafe" +) + +// ETag is an entity tag. +type ETag string + +const ( + // ETagNone represents an empty entity tag. + ETagNone ETag = "" + + // ETagAny matches any entity tag. + ETagAny ETag = "*" +) + +// Metadata contains metadata key/value pairs. +type Metadata map[string]string + +const mdPrefix = "x-ms-meta-" + +const mdPrefixLen = len(mdPrefix) + +// UnmarshalXML implements the xml.Unmarshaler interface for Metadata. +func (md *Metadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tokName := "" + for t, err := d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = strings.ToLower(tt.Name.Local) + break + case xml.CharData: + if *md == nil { + *md = Metadata{} + } + (*md)[tokName] = string(tt) + break + } + } + return nil +} + +// Marker represents an opaque value used in paged responses. +type Marker struct { + val *string +} + +// NotDone returns true if the list enumeration should be started or is not yet complete. Specifically, NotDone returns true +// for a just-initialized (zero value) Marker indicating that you should make an initial request to get a result portion from +// the service. NotDone also returns true whenever the service returns an interim result portion. NotDone returns false only +// after the service has returned the final result portion. +func (m Marker) NotDone() bool { + return m.val == nil || *m.val != "" +} + +// UnmarshalXML implements the xml.Unmarshaler interface for Marker. +func (m *Marker) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var out string + err := d.DecodeElement(&out, &start) + m.val = &out + return err +} + +// concatenates a slice of const values with the specified separator between each item +func joinConst(s interface{}, sep string) string { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + panic("s wasn't a slice or array") + } + ss := make([]string, 0, v.Len()) + for i := 0; i < v.Len(); i++ { + ss = append(ss, v.Index(i).String()) + } + return strings.Join(ss, sep) +} + +func validateError(err error) { + if err != nil { + panic(err) + } +} + +// AccessTierType enumerates the values for access tier type. +type AccessTierType string + +const ( + // AccessTierArchive ... + AccessTierArchive AccessTierType = "Archive" + // AccessTierCool ... + AccessTierCool AccessTierType = "Cool" + // AccessTierHot ... + AccessTierHot AccessTierType = "Hot" + // AccessTierNone represents an empty AccessTierType. + AccessTierNone AccessTierType = "" + // AccessTierP10 ... + AccessTierP10 AccessTierType = "P10" + // AccessTierP20 ... + AccessTierP20 AccessTierType = "P20" + // AccessTierP30 ... + AccessTierP30 AccessTierType = "P30" + // AccessTierP4 ... + AccessTierP4 AccessTierType = "P4" + // AccessTierP40 ... + AccessTierP40 AccessTierType = "P40" + // AccessTierP50 ... + AccessTierP50 AccessTierType = "P50" + // AccessTierP6 ... + AccessTierP6 AccessTierType = "P6" +) + +// PossibleAccessTierTypeValues returns an array of possible values for the AccessTierType const type. +func PossibleAccessTierTypeValues() []AccessTierType { + return []AccessTierType{AccessTierArchive, AccessTierCool, AccessTierHot, AccessTierNone, AccessTierP10, AccessTierP20, AccessTierP30, AccessTierP4, AccessTierP40, AccessTierP50, AccessTierP6} +} + +// AccountKindType enumerates the values for account kind type. +type AccountKindType string + +const ( + // AccountKindBlobStorage ... + AccountKindBlobStorage AccountKindType = "BlobStorage" + // AccountKindNone represents an empty AccountKindType. + AccountKindNone AccountKindType = "" + // AccountKindStorage ... + AccountKindStorage AccountKindType = "Storage" + // AccountKindStorageV2 ... + AccountKindStorageV2 AccountKindType = "StorageV2" +) + +// PossibleAccountKindTypeValues returns an array of possible values for the AccountKindType const type. +func PossibleAccountKindTypeValues() []AccountKindType { + return []AccountKindType{AccountKindBlobStorage, AccountKindNone, AccountKindStorage, AccountKindStorageV2} +} + +// ArchiveStatusType enumerates the values for archive status type. +type ArchiveStatusType string + +const ( + // ArchiveStatusNone represents an empty ArchiveStatusType. + ArchiveStatusNone ArchiveStatusType = "" + // ArchiveStatusRehydratePendingToCool ... + ArchiveStatusRehydratePendingToCool ArchiveStatusType = "rehydrate-pending-to-cool" + // ArchiveStatusRehydratePendingToHot ... + ArchiveStatusRehydratePendingToHot ArchiveStatusType = "rehydrate-pending-to-hot" +) + +// PossibleArchiveStatusTypeValues returns an array of possible values for the ArchiveStatusType const type. +func PossibleArchiveStatusTypeValues() []ArchiveStatusType { + return []ArchiveStatusType{ArchiveStatusNone, ArchiveStatusRehydratePendingToCool, ArchiveStatusRehydratePendingToHot} +} + +// BlobType enumerates the values for blob type. +type BlobType string + +const ( + // BlobAppendBlob ... + BlobAppendBlob BlobType = "AppendBlob" + // BlobBlockBlob ... + BlobBlockBlob BlobType = "BlockBlob" + // BlobNone represents an empty BlobType. + BlobNone BlobType = "" + // BlobPageBlob ... + BlobPageBlob BlobType = "PageBlob" +) + +// PossibleBlobTypeValues returns an array of possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return []BlobType{BlobAppendBlob, BlobBlockBlob, BlobNone, BlobPageBlob} +} + +// BlockListType enumerates the values for block list type. +type BlockListType string + +const ( + // BlockListAll ... + BlockListAll BlockListType = "all" + // BlockListCommitted ... + BlockListCommitted BlockListType = "committed" + // BlockListNone represents an empty BlockListType. + BlockListNone BlockListType = "" + // BlockListUncommitted ... + BlockListUncommitted BlockListType = "uncommitted" +) + +// PossibleBlockListTypeValues returns an array of possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return []BlockListType{BlockListAll, BlockListCommitted, BlockListNone, BlockListUncommitted} +} + +// CopyStatusType enumerates the values for copy status type. +type CopyStatusType string + +const ( + // CopyStatusAborted ... + CopyStatusAborted CopyStatusType = "aborted" + // CopyStatusFailed ... + CopyStatusFailed CopyStatusType = "failed" + // CopyStatusNone represents an empty CopyStatusType. + CopyStatusNone CopyStatusType = "" + // CopyStatusPending ... + CopyStatusPending CopyStatusType = "pending" + // CopyStatusSuccess ... + CopyStatusSuccess CopyStatusType = "success" +) + +// PossibleCopyStatusTypeValues returns an array of possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return []CopyStatusType{CopyStatusAborted, CopyStatusFailed, CopyStatusNone, CopyStatusPending, CopyStatusSuccess} +} + +// DeleteSnapshotsOptionType enumerates the values for delete snapshots option type. +type DeleteSnapshotsOptionType string + +const ( + // DeleteSnapshotsOptionInclude ... + DeleteSnapshotsOptionInclude DeleteSnapshotsOptionType = "include" + // DeleteSnapshotsOptionNone represents an empty DeleteSnapshotsOptionType. + DeleteSnapshotsOptionNone DeleteSnapshotsOptionType = "" + // DeleteSnapshotsOptionOnly ... + DeleteSnapshotsOptionOnly DeleteSnapshotsOptionType = "only" +) + +// PossibleDeleteSnapshotsOptionTypeValues returns an array of possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return []DeleteSnapshotsOptionType{DeleteSnapshotsOptionInclude, DeleteSnapshotsOptionNone, DeleteSnapshotsOptionOnly} +} + +// GeoReplicationStatusType enumerates the values for geo replication status type. +type GeoReplicationStatusType string + +const ( + // GeoReplicationStatusBootstrap ... + GeoReplicationStatusBootstrap GeoReplicationStatusType = "bootstrap" + // GeoReplicationStatusLive ... + GeoReplicationStatusLive GeoReplicationStatusType = "live" + // GeoReplicationStatusNone represents an empty GeoReplicationStatusType. + GeoReplicationStatusNone GeoReplicationStatusType = "" + // GeoReplicationStatusUnavailable ... + GeoReplicationStatusUnavailable GeoReplicationStatusType = "unavailable" +) + +// PossibleGeoReplicationStatusTypeValues returns an array of possible values for the GeoReplicationStatusType const type. +func PossibleGeoReplicationStatusTypeValues() []GeoReplicationStatusType { + return []GeoReplicationStatusType{GeoReplicationStatusBootstrap, GeoReplicationStatusLive, GeoReplicationStatusNone, GeoReplicationStatusUnavailable} +} + +// LeaseDurationType enumerates the values for lease duration type. +type LeaseDurationType string + +const ( + // LeaseDurationFixed ... + LeaseDurationFixed LeaseDurationType = "fixed" + // LeaseDurationInfinite ... + LeaseDurationInfinite LeaseDurationType = "infinite" + // LeaseDurationNone represents an empty LeaseDurationType. + LeaseDurationNone LeaseDurationType = "" +) + +// PossibleLeaseDurationTypeValues returns an array of possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return []LeaseDurationType{LeaseDurationFixed, LeaseDurationInfinite, LeaseDurationNone} +} + +// LeaseStateType enumerates the values for lease state type. +type LeaseStateType string + +const ( + // LeaseStateAvailable ... + LeaseStateAvailable LeaseStateType = "available" + // LeaseStateBreaking ... + LeaseStateBreaking LeaseStateType = "breaking" + // LeaseStateBroken ... + LeaseStateBroken LeaseStateType = "broken" + // LeaseStateExpired ... + LeaseStateExpired LeaseStateType = "expired" + // LeaseStateLeased ... + LeaseStateLeased LeaseStateType = "leased" + // LeaseStateNone represents an empty LeaseStateType. + LeaseStateNone LeaseStateType = "" +) + +// PossibleLeaseStateTypeValues returns an array of possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return []LeaseStateType{LeaseStateAvailable, LeaseStateBreaking, LeaseStateBroken, LeaseStateExpired, LeaseStateLeased, LeaseStateNone} +} + +// LeaseStatusType enumerates the values for lease status type. +type LeaseStatusType string + +const ( + // LeaseStatusLocked ... + LeaseStatusLocked LeaseStatusType = "locked" + // LeaseStatusNone represents an empty LeaseStatusType. + LeaseStatusNone LeaseStatusType = "" + // LeaseStatusUnlocked ... + LeaseStatusUnlocked LeaseStatusType = "unlocked" +) + +// PossibleLeaseStatusTypeValues returns an array of possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return []LeaseStatusType{LeaseStatusLocked, LeaseStatusNone, LeaseStatusUnlocked} +} + +// ListBlobsIncludeItemType enumerates the values for list blobs include item type. +type ListBlobsIncludeItemType string + +const ( + // ListBlobsIncludeItemCopy ... + ListBlobsIncludeItemCopy ListBlobsIncludeItemType = "copy" + // ListBlobsIncludeItemDeleted ... + ListBlobsIncludeItemDeleted ListBlobsIncludeItemType = "deleted" + // ListBlobsIncludeItemMetadata ... + ListBlobsIncludeItemMetadata ListBlobsIncludeItemType = "metadata" + // ListBlobsIncludeItemNone represents an empty ListBlobsIncludeItemType. + ListBlobsIncludeItemNone ListBlobsIncludeItemType = "" + // ListBlobsIncludeItemSnapshots ... + ListBlobsIncludeItemSnapshots ListBlobsIncludeItemType = "snapshots" + // ListBlobsIncludeItemUncommittedblobs ... + ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItemType = "uncommittedblobs" +) + +// PossibleListBlobsIncludeItemTypeValues returns an array of possible values for the ListBlobsIncludeItemType const type. +func PossibleListBlobsIncludeItemTypeValues() []ListBlobsIncludeItemType { + return []ListBlobsIncludeItemType{ListBlobsIncludeItemCopy, ListBlobsIncludeItemDeleted, ListBlobsIncludeItemMetadata, ListBlobsIncludeItemNone, ListBlobsIncludeItemSnapshots, ListBlobsIncludeItemUncommittedblobs} +} + +// ListContainersIncludeType enumerates the values for list containers include type. +type ListContainersIncludeType string + +const ( + // ListContainersIncludeMetadata ... + ListContainersIncludeMetadata ListContainersIncludeType = "metadata" + // ListContainersIncludeNone represents an empty ListContainersIncludeType. + ListContainersIncludeNone ListContainersIncludeType = "" +) + +// PossibleListContainersIncludeTypeValues returns an array of possible values for the ListContainersIncludeType const type. +func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { + return []ListContainersIncludeType{ListContainersIncludeMetadata, ListContainersIncludeNone} +} + +// PublicAccessType enumerates the values for public access type. +type PublicAccessType string + +const ( + // PublicAccessBlob ... + PublicAccessBlob PublicAccessType = "blob" + // PublicAccessContainer ... + PublicAccessContainer PublicAccessType = "container" + // PublicAccessNone represents an empty PublicAccessType. + PublicAccessNone PublicAccessType = "" +) + +// PossiblePublicAccessTypeValues returns an array of possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return []PublicAccessType{PublicAccessBlob, PublicAccessContainer, PublicAccessNone} +} + +// SequenceNumberActionType enumerates the values for sequence number action type. +type SequenceNumberActionType string + +const ( + // SequenceNumberActionIncrement ... + SequenceNumberActionIncrement SequenceNumberActionType = "increment" + // SequenceNumberActionMax ... + SequenceNumberActionMax SequenceNumberActionType = "max" + // SequenceNumberActionNone represents an empty SequenceNumberActionType. + SequenceNumberActionNone SequenceNumberActionType = "" + // SequenceNumberActionUpdate ... + SequenceNumberActionUpdate SequenceNumberActionType = "update" +) + +// PossibleSequenceNumberActionTypeValues returns an array of possible values for the SequenceNumberActionType const type. +func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { + return []SequenceNumberActionType{SequenceNumberActionIncrement, SequenceNumberActionMax, SequenceNumberActionNone, SequenceNumberActionUpdate} +} + +// SkuNameType enumerates the values for sku name type. +type SkuNameType string + +const ( + // SkuNameNone represents an empty SkuNameType. + SkuNameNone SkuNameType = "" + // SkuNamePremiumLRS ... + SkuNamePremiumLRS SkuNameType = "Premium_LRS" + // SkuNameStandardGRS ... + SkuNameStandardGRS SkuNameType = "Standard_GRS" + // SkuNameStandardLRS ... + SkuNameStandardLRS SkuNameType = "Standard_LRS" + // SkuNameStandardRAGRS ... + SkuNameStandardRAGRS SkuNameType = "Standard_RAGRS" + // SkuNameStandardZRS ... + SkuNameStandardZRS SkuNameType = "Standard_ZRS" +) + +// PossibleSkuNameTypeValues returns an array of possible values for the SkuNameType const type. +func PossibleSkuNameTypeValues() []SkuNameType { + return []SkuNameType{SkuNameNone, SkuNamePremiumLRS, SkuNameStandardGRS, SkuNameStandardLRS, SkuNameStandardRAGRS, SkuNameStandardZRS} +} + +// StorageErrorCodeType enumerates the values for storage error code type. +type StorageErrorCodeType string + +const ( + // StorageErrorCodeAccountAlreadyExists ... + StorageErrorCodeAccountAlreadyExists StorageErrorCodeType = "AccountAlreadyExists" + // StorageErrorCodeAccountBeingCreated ... + StorageErrorCodeAccountBeingCreated StorageErrorCodeType = "AccountBeingCreated" + // StorageErrorCodeAccountIsDisabled ... + StorageErrorCodeAccountIsDisabled StorageErrorCodeType = "AccountIsDisabled" + // StorageErrorCodeAppendPositionConditionNotMet ... + StorageErrorCodeAppendPositionConditionNotMet StorageErrorCodeType = "AppendPositionConditionNotMet" + // StorageErrorCodeAuthenticationFailed ... + StorageErrorCodeAuthenticationFailed StorageErrorCodeType = "AuthenticationFailed" + // StorageErrorCodeBlobAlreadyExists ... + StorageErrorCodeBlobAlreadyExists StorageErrorCodeType = "BlobAlreadyExists" + // StorageErrorCodeBlobArchived ... + StorageErrorCodeBlobArchived StorageErrorCodeType = "BlobArchived" + // StorageErrorCodeBlobBeingRehydrated ... + StorageErrorCodeBlobBeingRehydrated StorageErrorCodeType = "BlobBeingRehydrated" + // StorageErrorCodeBlobNotArchived ... + StorageErrorCodeBlobNotArchived StorageErrorCodeType = "BlobNotArchived" + // StorageErrorCodeBlobNotFound ... + StorageErrorCodeBlobNotFound StorageErrorCodeType = "BlobNotFound" + // StorageErrorCodeBlobOverwritten ... + StorageErrorCodeBlobOverwritten StorageErrorCodeType = "BlobOverwritten" + // StorageErrorCodeBlobTierInadequateForContentLength ... + StorageErrorCodeBlobTierInadequateForContentLength StorageErrorCodeType = "BlobTierInadequateForContentLength" + // StorageErrorCodeBlockCountExceedsLimit ... + StorageErrorCodeBlockCountExceedsLimit StorageErrorCodeType = "BlockCountExceedsLimit" + // StorageErrorCodeBlockListTooLong ... + StorageErrorCodeBlockListTooLong StorageErrorCodeType = "BlockListTooLong" + // StorageErrorCodeCannotChangeToLowerTier ... + StorageErrorCodeCannotChangeToLowerTier StorageErrorCodeType = "CannotChangeToLowerTier" + // StorageErrorCodeCannotVerifyCopySource ... + StorageErrorCodeCannotVerifyCopySource StorageErrorCodeType = "CannotVerifyCopySource" + // StorageErrorCodeConditionHeadersNotSupported ... + StorageErrorCodeConditionHeadersNotSupported StorageErrorCodeType = "ConditionHeadersNotSupported" + // StorageErrorCodeConditionNotMet ... + StorageErrorCodeConditionNotMet StorageErrorCodeType = "ConditionNotMet" + // StorageErrorCodeContainerAlreadyExists ... + StorageErrorCodeContainerAlreadyExists StorageErrorCodeType = "ContainerAlreadyExists" + // StorageErrorCodeContainerBeingDeleted ... + StorageErrorCodeContainerBeingDeleted StorageErrorCodeType = "ContainerBeingDeleted" + // StorageErrorCodeContainerDisabled ... + StorageErrorCodeContainerDisabled StorageErrorCodeType = "ContainerDisabled" + // StorageErrorCodeContainerNotFound ... + StorageErrorCodeContainerNotFound StorageErrorCodeType = "ContainerNotFound" + // StorageErrorCodeContentLengthLargerThanTierLimit ... + StorageErrorCodeContentLengthLargerThanTierLimit StorageErrorCodeType = "ContentLengthLargerThanTierLimit" + // StorageErrorCodeCopyAcrossAccountsNotSupported ... + StorageErrorCodeCopyAcrossAccountsNotSupported StorageErrorCodeType = "CopyAcrossAccountsNotSupported" + // StorageErrorCodeCopyIDMismatch ... + StorageErrorCodeCopyIDMismatch StorageErrorCodeType = "CopyIdMismatch" + // StorageErrorCodeEmptyMetadataKey ... + StorageErrorCodeEmptyMetadataKey StorageErrorCodeType = "EmptyMetadataKey" + // StorageErrorCodeFeatureVersionMismatch ... + StorageErrorCodeFeatureVersionMismatch StorageErrorCodeType = "FeatureVersionMismatch" + // StorageErrorCodeIncrementalCopyBlobMismatch ... + StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCodeType = "IncrementalCopyBlobMismatch" + // StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ... + StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed StorageErrorCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + // StorageErrorCodeIncrementalCopySourceMustBeSnapshot ... + StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCodeType = "IncrementalCopySourceMustBeSnapshot" + // StorageErrorCodeInfiniteLeaseDurationRequired ... + StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCodeType = "InfiniteLeaseDurationRequired" + // StorageErrorCodeInsufficientAccountPermissions ... + StorageErrorCodeInsufficientAccountPermissions StorageErrorCodeType = "InsufficientAccountPermissions" + // StorageErrorCodeInternalError ... + StorageErrorCodeInternalError StorageErrorCodeType = "InternalError" + // StorageErrorCodeInvalidAuthenticationInfo ... + StorageErrorCodeInvalidAuthenticationInfo StorageErrorCodeType = "InvalidAuthenticationInfo" + // StorageErrorCodeInvalidBlobOrBlock ... + StorageErrorCodeInvalidBlobOrBlock StorageErrorCodeType = "InvalidBlobOrBlock" + // StorageErrorCodeInvalidBlobTier ... + StorageErrorCodeInvalidBlobTier StorageErrorCodeType = "InvalidBlobTier" + // StorageErrorCodeInvalidBlobType ... + StorageErrorCodeInvalidBlobType StorageErrorCodeType = "InvalidBlobType" + // StorageErrorCodeInvalidBlockID ... + StorageErrorCodeInvalidBlockID StorageErrorCodeType = "InvalidBlockId" + // StorageErrorCodeInvalidBlockList ... + StorageErrorCodeInvalidBlockList StorageErrorCodeType = "InvalidBlockList" + // StorageErrorCodeInvalidHeaderValue ... + StorageErrorCodeInvalidHeaderValue StorageErrorCodeType = "InvalidHeaderValue" + // StorageErrorCodeInvalidHTTPVerb ... + StorageErrorCodeInvalidHTTPVerb StorageErrorCodeType = "InvalidHttpVerb" + // StorageErrorCodeInvalidInput ... + StorageErrorCodeInvalidInput StorageErrorCodeType = "InvalidInput" + // StorageErrorCodeInvalidMd5 ... + StorageErrorCodeInvalidMd5 StorageErrorCodeType = "InvalidMd5" + // StorageErrorCodeInvalidMetadata ... + StorageErrorCodeInvalidMetadata StorageErrorCodeType = "InvalidMetadata" + // StorageErrorCodeInvalidOperation ... + StorageErrorCodeInvalidOperation StorageErrorCodeType = "InvalidOperation" + // StorageErrorCodeInvalidPageRange ... + StorageErrorCodeInvalidPageRange StorageErrorCodeType = "InvalidPageRange" + // StorageErrorCodeInvalidQueryParameterValue ... + StorageErrorCodeInvalidQueryParameterValue StorageErrorCodeType = "InvalidQueryParameterValue" + // StorageErrorCodeInvalidRange ... + StorageErrorCodeInvalidRange StorageErrorCodeType = "InvalidRange" + // StorageErrorCodeInvalidResourceName ... + StorageErrorCodeInvalidResourceName StorageErrorCodeType = "InvalidResourceName" + // StorageErrorCodeInvalidSourceBlobType ... + StorageErrorCodeInvalidSourceBlobType StorageErrorCodeType = "InvalidSourceBlobType" + // StorageErrorCodeInvalidSourceBlobURL ... + StorageErrorCodeInvalidSourceBlobURL StorageErrorCodeType = "InvalidSourceBlobUrl" + // StorageErrorCodeInvalidURI ... + StorageErrorCodeInvalidURI StorageErrorCodeType = "InvalidUri" + // StorageErrorCodeInvalidVersionForPageBlobOperation ... + StorageErrorCodeInvalidVersionForPageBlobOperation StorageErrorCodeType = "InvalidVersionForPageBlobOperation" + // StorageErrorCodeInvalidXMLDocument ... + StorageErrorCodeInvalidXMLDocument StorageErrorCodeType = "InvalidXmlDocument" + // StorageErrorCodeInvalidXMLNodeValue ... + StorageErrorCodeInvalidXMLNodeValue StorageErrorCodeType = "InvalidXmlNodeValue" + // StorageErrorCodeLeaseAlreadyBroken ... + StorageErrorCodeLeaseAlreadyBroken StorageErrorCodeType = "LeaseAlreadyBroken" + // StorageErrorCodeLeaseAlreadyPresent ... + StorageErrorCodeLeaseAlreadyPresent StorageErrorCodeType = "LeaseAlreadyPresent" + // StorageErrorCodeLeaseIDMismatchWithBlobOperation ... + StorageErrorCodeLeaseIDMismatchWithBlobOperation StorageErrorCodeType = "LeaseIdMismatchWithBlobOperation" + // StorageErrorCodeLeaseIDMismatchWithContainerOperation ... + StorageErrorCodeLeaseIDMismatchWithContainerOperation StorageErrorCodeType = "LeaseIdMismatchWithContainerOperation" + // StorageErrorCodeLeaseIDMismatchWithLeaseOperation ... + StorageErrorCodeLeaseIDMismatchWithLeaseOperation StorageErrorCodeType = "LeaseIdMismatchWithLeaseOperation" + // StorageErrorCodeLeaseIDMissing ... + StorageErrorCodeLeaseIDMissing StorageErrorCodeType = "LeaseIdMissing" + // StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired ... + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired StorageErrorCodeType = "LeaseIsBreakingAndCannotBeAcquired" + // StorageErrorCodeLeaseIsBreakingAndCannotBeChanged ... + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged StorageErrorCodeType = "LeaseIsBreakingAndCannotBeChanged" + // StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed ... + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed StorageErrorCodeType = "LeaseIsBrokenAndCannotBeRenewed" + // StorageErrorCodeLeaseLost ... + StorageErrorCodeLeaseLost StorageErrorCodeType = "LeaseLost" + // StorageErrorCodeLeaseNotPresentWithBlobOperation ... + StorageErrorCodeLeaseNotPresentWithBlobOperation StorageErrorCodeType = "LeaseNotPresentWithBlobOperation" + // StorageErrorCodeLeaseNotPresentWithContainerOperation ... + StorageErrorCodeLeaseNotPresentWithContainerOperation StorageErrorCodeType = "LeaseNotPresentWithContainerOperation" + // StorageErrorCodeLeaseNotPresentWithLeaseOperation ... + StorageErrorCodeLeaseNotPresentWithLeaseOperation StorageErrorCodeType = "LeaseNotPresentWithLeaseOperation" + // StorageErrorCodeMaxBlobSizeConditionNotMet ... + StorageErrorCodeMaxBlobSizeConditionNotMet StorageErrorCodeType = "MaxBlobSizeConditionNotMet" + // StorageErrorCodeMd5Mismatch ... + StorageErrorCodeMd5Mismatch StorageErrorCodeType = "Md5Mismatch" + // StorageErrorCodeMetadataTooLarge ... + StorageErrorCodeMetadataTooLarge StorageErrorCodeType = "MetadataTooLarge" + // StorageErrorCodeMissingContentLengthHeader ... + StorageErrorCodeMissingContentLengthHeader StorageErrorCodeType = "MissingContentLengthHeader" + // StorageErrorCodeMissingRequiredHeader ... + StorageErrorCodeMissingRequiredHeader StorageErrorCodeType = "MissingRequiredHeader" + // StorageErrorCodeMissingRequiredQueryParameter ... + StorageErrorCodeMissingRequiredQueryParameter StorageErrorCodeType = "MissingRequiredQueryParameter" + // StorageErrorCodeMissingRequiredXMLNode ... + StorageErrorCodeMissingRequiredXMLNode StorageErrorCodeType = "MissingRequiredXmlNode" + // StorageErrorCodeMultipleConditionHeadersNotSupported ... + StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCodeType = "MultipleConditionHeadersNotSupported" + // StorageErrorCodeNone represents an empty StorageErrorCodeType. + StorageErrorCodeNone StorageErrorCodeType = "" + // StorageErrorCodeNoPendingCopyOperation ... + StorageErrorCodeNoPendingCopyOperation StorageErrorCodeType = "NoPendingCopyOperation" + // StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob ... + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob StorageErrorCodeType = "OperationNotAllowedOnIncrementalCopyBlob" + // StorageErrorCodeOperationTimedOut ... + StorageErrorCodeOperationTimedOut StorageErrorCodeType = "OperationTimedOut" + // StorageErrorCodeOutOfRangeInput ... + StorageErrorCodeOutOfRangeInput StorageErrorCodeType = "OutOfRangeInput" + // StorageErrorCodeOutOfRangeQueryParameterValue ... + StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCodeType = "OutOfRangeQueryParameterValue" + // StorageErrorCodePendingCopyOperation ... + StorageErrorCodePendingCopyOperation StorageErrorCodeType = "PendingCopyOperation" + // StorageErrorCodePreviousSnapshotCannotBeNewer ... + StorageErrorCodePreviousSnapshotCannotBeNewer StorageErrorCodeType = "PreviousSnapshotCannotBeNewer" + // StorageErrorCodePreviousSnapshotNotFound ... + StorageErrorCodePreviousSnapshotNotFound StorageErrorCodeType = "PreviousSnapshotNotFound" + // StorageErrorCodePreviousSnapshotOperationNotSupported ... + StorageErrorCodePreviousSnapshotOperationNotSupported StorageErrorCodeType = "PreviousSnapshotOperationNotSupported" + // StorageErrorCodeRequestBodyTooLarge ... + StorageErrorCodeRequestBodyTooLarge StorageErrorCodeType = "RequestBodyTooLarge" + // StorageErrorCodeRequestURLFailedToParse ... + StorageErrorCodeRequestURLFailedToParse StorageErrorCodeType = "RequestUrlFailedToParse" + // StorageErrorCodeResourceAlreadyExists ... + StorageErrorCodeResourceAlreadyExists StorageErrorCodeType = "ResourceAlreadyExists" + // StorageErrorCodeResourceNotFound ... + StorageErrorCodeResourceNotFound StorageErrorCodeType = "ResourceNotFound" + // StorageErrorCodeResourceTypeMismatch ... + StorageErrorCodeResourceTypeMismatch StorageErrorCodeType = "ResourceTypeMismatch" + // StorageErrorCodeSequenceNumberConditionNotMet ... + StorageErrorCodeSequenceNumberConditionNotMet StorageErrorCodeType = "SequenceNumberConditionNotMet" + // StorageErrorCodeSequenceNumberIncrementTooLarge ... + StorageErrorCodeSequenceNumberIncrementTooLarge StorageErrorCodeType = "SequenceNumberIncrementTooLarge" + // StorageErrorCodeServerBusy ... + StorageErrorCodeServerBusy StorageErrorCodeType = "ServerBusy" + // StorageErrorCodeSnaphotOperationRateExceeded ... + StorageErrorCodeSnaphotOperationRateExceeded StorageErrorCodeType = "SnaphotOperationRateExceeded" + // StorageErrorCodeSnapshotCountExceeded ... + StorageErrorCodeSnapshotCountExceeded StorageErrorCodeType = "SnapshotCountExceeded" + // StorageErrorCodeSnapshotsPresent ... + StorageErrorCodeSnapshotsPresent StorageErrorCodeType = "SnapshotsPresent" + // StorageErrorCodeSourceConditionNotMet ... + StorageErrorCodeSourceConditionNotMet StorageErrorCodeType = "SourceConditionNotMet" + // StorageErrorCodeSystemInUse ... + StorageErrorCodeSystemInUse StorageErrorCodeType = "SystemInUse" + // StorageErrorCodeTargetConditionNotMet ... + StorageErrorCodeTargetConditionNotMet StorageErrorCodeType = "TargetConditionNotMet" + // StorageErrorCodeUnauthorizedBlobOverwrite ... + StorageErrorCodeUnauthorizedBlobOverwrite StorageErrorCodeType = "UnauthorizedBlobOverwrite" + // StorageErrorCodeUnsupportedHeader ... + StorageErrorCodeUnsupportedHeader StorageErrorCodeType = "UnsupportedHeader" + // StorageErrorCodeUnsupportedHTTPVerb ... + StorageErrorCodeUnsupportedHTTPVerb StorageErrorCodeType = "UnsupportedHttpVerb" + // StorageErrorCodeUnsupportedQueryParameter ... + StorageErrorCodeUnsupportedQueryParameter StorageErrorCodeType = "UnsupportedQueryParameter" + // StorageErrorCodeUnsupportedXMLNode ... + StorageErrorCodeUnsupportedXMLNode StorageErrorCodeType = "UnsupportedXmlNode" +) + +// PossibleStorageErrorCodeTypeValues returns an array of possible values for the StorageErrorCodeType const type. +func PossibleStorageErrorCodeTypeValues() []StorageErrorCodeType { + return []StorageErrorCodeType{StorageErrorCodeAccountAlreadyExists, StorageErrorCodeAccountBeingCreated, StorageErrorCodeAccountIsDisabled, StorageErrorCodeAppendPositionConditionNotMet, StorageErrorCodeAuthenticationFailed, StorageErrorCodeBlobAlreadyExists, StorageErrorCodeBlobArchived, StorageErrorCodeBlobBeingRehydrated, StorageErrorCodeBlobNotArchived, StorageErrorCodeBlobNotFound, StorageErrorCodeBlobOverwritten, StorageErrorCodeBlobTierInadequateForContentLength, StorageErrorCodeBlockCountExceedsLimit, StorageErrorCodeBlockListTooLong, StorageErrorCodeCannotChangeToLowerTier, StorageErrorCodeCannotVerifyCopySource, StorageErrorCodeConditionHeadersNotSupported, StorageErrorCodeConditionNotMet, StorageErrorCodeContainerAlreadyExists, StorageErrorCodeContainerBeingDeleted, StorageErrorCodeContainerDisabled, StorageErrorCodeContainerNotFound, StorageErrorCodeContentLengthLargerThanTierLimit, StorageErrorCodeCopyAcrossAccountsNotSupported, StorageErrorCodeCopyIDMismatch, StorageErrorCodeEmptyMetadataKey, StorageErrorCodeFeatureVersionMismatch, StorageErrorCodeIncrementalCopyBlobMismatch, StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, StorageErrorCodeIncrementalCopySourceMustBeSnapshot, StorageErrorCodeInfiniteLeaseDurationRequired, StorageErrorCodeInsufficientAccountPermissions, StorageErrorCodeInternalError, StorageErrorCodeInvalidAuthenticationInfo, StorageErrorCodeInvalidBlobOrBlock, StorageErrorCodeInvalidBlobTier, StorageErrorCodeInvalidBlobType, StorageErrorCodeInvalidBlockID, StorageErrorCodeInvalidBlockList, StorageErrorCodeInvalidHeaderValue, StorageErrorCodeInvalidHTTPVerb, StorageErrorCodeInvalidInput, StorageErrorCodeInvalidMd5, StorageErrorCodeInvalidMetadata, StorageErrorCodeInvalidOperation, StorageErrorCodeInvalidPageRange, StorageErrorCodeInvalidQueryParameterValue, StorageErrorCodeInvalidRange, StorageErrorCodeInvalidResourceName, StorageErrorCodeInvalidSourceBlobType, StorageErrorCodeInvalidSourceBlobURL, StorageErrorCodeInvalidURI, StorageErrorCodeInvalidVersionForPageBlobOperation, StorageErrorCodeInvalidXMLDocument, StorageErrorCodeInvalidXMLNodeValue, StorageErrorCodeLeaseAlreadyBroken, StorageErrorCodeLeaseAlreadyPresent, StorageErrorCodeLeaseIDMismatchWithBlobOperation, StorageErrorCodeLeaseIDMismatchWithContainerOperation, StorageErrorCodeLeaseIDMismatchWithLeaseOperation, StorageErrorCodeLeaseIDMissing, StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, StorageErrorCodeLeaseLost, StorageErrorCodeLeaseNotPresentWithBlobOperation, StorageErrorCodeLeaseNotPresentWithContainerOperation, StorageErrorCodeLeaseNotPresentWithLeaseOperation, StorageErrorCodeMaxBlobSizeConditionNotMet, StorageErrorCodeMd5Mismatch, StorageErrorCodeMetadataTooLarge, StorageErrorCodeMissingContentLengthHeader, StorageErrorCodeMissingRequiredHeader, StorageErrorCodeMissingRequiredQueryParameter, StorageErrorCodeMissingRequiredXMLNode, StorageErrorCodeMultipleConditionHeadersNotSupported, StorageErrorCodeNone, StorageErrorCodeNoPendingCopyOperation, StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, StorageErrorCodeOperationTimedOut, StorageErrorCodeOutOfRangeInput, StorageErrorCodeOutOfRangeQueryParameterValue, StorageErrorCodePendingCopyOperation, StorageErrorCodePreviousSnapshotCannotBeNewer, StorageErrorCodePreviousSnapshotNotFound, StorageErrorCodePreviousSnapshotOperationNotSupported, StorageErrorCodeRequestBodyTooLarge, StorageErrorCodeRequestURLFailedToParse, StorageErrorCodeResourceAlreadyExists, StorageErrorCodeResourceNotFound, StorageErrorCodeResourceTypeMismatch, StorageErrorCodeSequenceNumberConditionNotMet, StorageErrorCodeSequenceNumberIncrementTooLarge, StorageErrorCodeServerBusy, StorageErrorCodeSnaphotOperationRateExceeded, StorageErrorCodeSnapshotCountExceeded, StorageErrorCodeSnapshotsPresent, StorageErrorCodeSourceConditionNotMet, StorageErrorCodeSystemInUse, StorageErrorCodeTargetConditionNotMet, StorageErrorCodeUnauthorizedBlobOverwrite, StorageErrorCodeUnsupportedHeader, StorageErrorCodeUnsupportedHTTPVerb, StorageErrorCodeUnsupportedQueryParameter, StorageErrorCodeUnsupportedXMLNode} +} + +// AccessPolicy - An Access policy +type AccessPolicy struct { + // Start - the date-time the policy is active + Start time.Time `xml:"Start"` + // Expiry - the date-time the policy expires + Expiry time.Time `xml:"Expiry"` + // Permission - the permissions for the acl policy + Permission string `xml:"Permission"` +} + +// MarshalXML implements the xml.Marshaler interface for AccessPolicy. +func (ap AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + ap2 := (*accessPolicy)(unsafe.Pointer(&ap)) + return e.EncodeElement(*ap2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for AccessPolicy. +func (ap *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + ap2 := (*accessPolicy)(unsafe.Pointer(ap)) + return d.DecodeElement(ap2, &start) +} + +// AppendBlobAppendBlockResponse ... +type AppendBlobAppendBlockResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (ababr AppendBlobAppendBlockResponse) Response() *http.Response { + return ababr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ababr AppendBlobAppendBlockResponse) StatusCode() int { + return ababr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ababr AppendBlobAppendBlockResponse) Status() string { + return ababr.rawResponse.Status +} + +// BlobAppendOffset returns the value for header x-ms-blob-append-offset. +func (ababr AppendBlobAppendBlockResponse) BlobAppendOffset() string { + return ababr.rawResponse.Header.Get("x-ms-blob-append-offset") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (ababr AppendBlobAppendBlockResponse) BlobCommittedBlockCount() int32 { + s := ababr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// ContentMD5 returns the value for header Content-MD5. +func (ababr AppendBlobAppendBlockResponse) ContentMD5() []byte { + s := ababr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (ababr AppendBlobAppendBlockResponse) Date() time.Time { + s := ababr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (ababr AppendBlobAppendBlockResponse) ErrorCode() string { + return ababr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (ababr AppendBlobAppendBlockResponse) ETag() ETag { + return ETag(ababr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (ababr AppendBlobAppendBlockResponse) LastModified() time.Time { + s := ababr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (ababr AppendBlobAppendBlockResponse) RequestID() string { + return ababr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ababr AppendBlobAppendBlockResponse) Version() string { + return ababr.rawResponse.Header.Get("x-ms-version") +} + +// AppendBlobCreateResponse ... +type AppendBlobCreateResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (abcr AppendBlobCreateResponse) Response() *http.Response { + return abcr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (abcr AppendBlobCreateResponse) StatusCode() int { + return abcr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (abcr AppendBlobCreateResponse) Status() string { + return abcr.rawResponse.Status +} + +// ContentMD5 returns the value for header Content-MD5. +func (abcr AppendBlobCreateResponse) ContentMD5() []byte { + s := abcr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (abcr AppendBlobCreateResponse) Date() time.Time { + s := abcr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (abcr AppendBlobCreateResponse) ErrorCode() string { + return abcr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (abcr AppendBlobCreateResponse) ETag() ETag { + return ETag(abcr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (abcr AppendBlobCreateResponse) IsServerEncrypted() string { + return abcr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (abcr AppendBlobCreateResponse) LastModified() time.Time { + s := abcr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (abcr AppendBlobCreateResponse) RequestID() string { + return abcr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (abcr AppendBlobCreateResponse) Version() string { + return abcr.rawResponse.Header.Get("x-ms-version") +} + +// BlobAbortCopyFromURLResponse ... +type BlobAbortCopyFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bacfur BlobAbortCopyFromURLResponse) Response() *http.Response { + return bacfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bacfur BlobAbortCopyFromURLResponse) StatusCode() int { + return bacfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bacfur BlobAbortCopyFromURLResponse) Status() string { + return bacfur.rawResponse.Status +} + +// Date returns the value for header Date. +func (bacfur BlobAbortCopyFromURLResponse) Date() time.Time { + s := bacfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bacfur BlobAbortCopyFromURLResponse) ErrorCode() string { + return bacfur.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bacfur BlobAbortCopyFromURLResponse) RequestID() string { + return bacfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bacfur BlobAbortCopyFromURLResponse) Version() string { + return bacfur.rawResponse.Header.Get("x-ms-version") +} + +// BlobAcquireLeaseResponse ... +type BlobAcquireLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (balr BlobAcquireLeaseResponse) Response() *http.Response { + return balr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (balr BlobAcquireLeaseResponse) StatusCode() int { + return balr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (balr BlobAcquireLeaseResponse) Status() string { + return balr.rawResponse.Status +} + +// Date returns the value for header Date. +func (balr BlobAcquireLeaseResponse) Date() time.Time { + s := balr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (balr BlobAcquireLeaseResponse) ErrorCode() string { + return balr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (balr BlobAcquireLeaseResponse) ETag() ETag { + return ETag(balr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (balr BlobAcquireLeaseResponse) LastModified() time.Time { + s := balr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (balr BlobAcquireLeaseResponse) LeaseID() string { + return balr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (balr BlobAcquireLeaseResponse) RequestID() string { + return balr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (balr BlobAcquireLeaseResponse) Version() string { + return balr.rawResponse.Header.Get("x-ms-version") +} + +// BlobBreakLeaseResponse ... +type BlobBreakLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bblr BlobBreakLeaseResponse) Response() *http.Response { + return bblr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bblr BlobBreakLeaseResponse) StatusCode() int { + return bblr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bblr BlobBreakLeaseResponse) Status() string { + return bblr.rawResponse.Status +} + +// Date returns the value for header Date. +func (bblr BlobBreakLeaseResponse) Date() time.Time { + s := bblr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bblr BlobBreakLeaseResponse) ErrorCode() string { + return bblr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bblr BlobBreakLeaseResponse) ETag() ETag { + return ETag(bblr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bblr BlobBreakLeaseResponse) LastModified() time.Time { + s := bblr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseTime returns the value for header x-ms-lease-time. +func (bblr BlobBreakLeaseResponse) LeaseTime() int32 { + s := bblr.rawResponse.Header.Get("x-ms-lease-time") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// RequestID returns the value for header x-ms-request-id. +func (bblr BlobBreakLeaseResponse) RequestID() string { + return bblr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bblr BlobBreakLeaseResponse) Version() string { + return bblr.rawResponse.Header.Get("x-ms-version") +} + +// BlobChangeLeaseResponse ... +type BlobChangeLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bclr BlobChangeLeaseResponse) Response() *http.Response { + return bclr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bclr BlobChangeLeaseResponse) StatusCode() int { + return bclr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bclr BlobChangeLeaseResponse) Status() string { + return bclr.rawResponse.Status +} + +// Date returns the value for header Date. +func (bclr BlobChangeLeaseResponse) Date() time.Time { + s := bclr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bclr BlobChangeLeaseResponse) ErrorCode() string { + return bclr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bclr BlobChangeLeaseResponse) ETag() ETag { + return ETag(bclr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bclr BlobChangeLeaseResponse) LastModified() time.Time { + s := bclr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (bclr BlobChangeLeaseResponse) LeaseID() string { + return bclr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (bclr BlobChangeLeaseResponse) RequestID() string { + return bclr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bclr BlobChangeLeaseResponse) Version() string { + return bclr.rawResponse.Header.Get("x-ms-version") +} + +// BlobCreateSnapshotResponse ... +type BlobCreateSnapshotResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bcsr BlobCreateSnapshotResponse) Response() *http.Response { + return bcsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bcsr BlobCreateSnapshotResponse) StatusCode() int { + return bcsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bcsr BlobCreateSnapshotResponse) Status() string { + return bcsr.rawResponse.Status +} + +// Date returns the value for header Date. +func (bcsr BlobCreateSnapshotResponse) Date() time.Time { + s := bcsr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bcsr BlobCreateSnapshotResponse) ErrorCode() string { + return bcsr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bcsr BlobCreateSnapshotResponse) ETag() ETag { + return ETag(bcsr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bcsr BlobCreateSnapshotResponse) LastModified() time.Time { + s := bcsr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bcsr BlobCreateSnapshotResponse) RequestID() string { + return bcsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Snapshot returns the value for header x-ms-snapshot. +func (bcsr BlobCreateSnapshotResponse) Snapshot() string { + return bcsr.rawResponse.Header.Get("x-ms-snapshot") +} + +// Version returns the value for header x-ms-version. +func (bcsr BlobCreateSnapshotResponse) Version() string { + return bcsr.rawResponse.Header.Get("x-ms-version") +} + +// BlobDeleteResponse ... +type BlobDeleteResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bdr BlobDeleteResponse) Response() *http.Response { + return bdr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bdr BlobDeleteResponse) StatusCode() int { + return bdr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bdr BlobDeleteResponse) Status() string { + return bdr.rawResponse.Status +} + +// Date returns the value for header Date. +func (bdr BlobDeleteResponse) Date() time.Time { + s := bdr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bdr BlobDeleteResponse) ErrorCode() string { + return bdr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bdr BlobDeleteResponse) RequestID() string { + return bdr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bdr BlobDeleteResponse) Version() string { + return bdr.rawResponse.Header.Get("x-ms-version") +} + +// BlobFlatListSegment ... +type BlobFlatListSegment struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Blobs"` + BlobItems []BlobItem `xml:"Blob"` +} + +// BlobGetAccountInfoResponse ... +type BlobGetAccountInfoResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bgair BlobGetAccountInfoResponse) Response() *http.Response { + return bgair.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bgair BlobGetAccountInfoResponse) StatusCode() int { + return bgair.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bgair BlobGetAccountInfoResponse) Status() string { + return bgair.rawResponse.Status +} + +// AccountKind returns the value for header x-ms-account-kind. +func (bgair BlobGetAccountInfoResponse) AccountKind() AccountKindType { + return AccountKindType(bgair.rawResponse.Header.Get("x-ms-account-kind")) +} + +// Date returns the value for header Date. +func (bgair BlobGetAccountInfoResponse) Date() time.Time { + s := bgair.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bgair BlobGetAccountInfoResponse) ErrorCode() string { + return bgair.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bgair BlobGetAccountInfoResponse) RequestID() string { + return bgair.rawResponse.Header.Get("x-ms-request-id") +} + +// SkuName returns the value for header x-ms-sku-name. +func (bgair BlobGetAccountInfoResponse) SkuName() SkuNameType { + return SkuNameType(bgair.rawResponse.Header.Get("x-ms-sku-name")) +} + +// Version returns the value for header x-ms-version. +func (bgair BlobGetAccountInfoResponse) Version() string { + return bgair.rawResponse.Header.Get("x-ms-version") +} + +// BlobGetPropertiesResponse ... +type BlobGetPropertiesResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (bgpr BlobGetPropertiesResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range bgpr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (bgpr BlobGetPropertiesResponse) Response() *http.Response { + return bgpr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bgpr BlobGetPropertiesResponse) StatusCode() int { + return bgpr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bgpr BlobGetPropertiesResponse) Status() string { + return bgpr.rawResponse.Status +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (bgpr BlobGetPropertiesResponse) AcceptRanges() string { + return bgpr.rawResponse.Header.Get("Accept-Ranges") +} + +// AccessTier returns the value for header x-ms-access-tier. +func (bgpr BlobGetPropertiesResponse) AccessTier() string { + return bgpr.rawResponse.Header.Get("x-ms-access-tier") +} + +// AccessTierChangeTime returns the value for header x-ms-access-tier-change-time. +func (bgpr BlobGetPropertiesResponse) AccessTierChangeTime() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-access-tier-change-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// AccessTierInferred returns the value for header x-ms-access-tier-inferred. +func (bgpr BlobGetPropertiesResponse) AccessTierInferred() string { + return bgpr.rawResponse.Header.Get("x-ms-access-tier-inferred") +} + +// ArchiveStatus returns the value for header x-ms-archive-status. +func (bgpr BlobGetPropertiesResponse) ArchiveStatus() string { + return bgpr.rawResponse.Header.Get("x-ms-archive-status") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (bgpr BlobGetPropertiesResponse) BlobCommittedBlockCount() int32 { + s := bgpr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (bgpr BlobGetPropertiesResponse) BlobSequenceNumber() int64 { + s := bgpr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// BlobType returns the value for header x-ms-blob-type. +func (bgpr BlobGetPropertiesResponse) BlobType() BlobType { + return BlobType(bgpr.rawResponse.Header.Get("x-ms-blob-type")) +} + +// CacheControl returns the value for header Cache-Control. +func (bgpr BlobGetPropertiesResponse) CacheControl() string { + return bgpr.rawResponse.Header.Get("Cache-Control") +} + +// ContentDisposition returns the value for header Content-Disposition. +func (bgpr BlobGetPropertiesResponse) ContentDisposition() string { + return bgpr.rawResponse.Header.Get("Content-Disposition") +} + +// ContentEncoding returns the value for header Content-Encoding. +func (bgpr BlobGetPropertiesResponse) ContentEncoding() string { + return bgpr.rawResponse.Header.Get("Content-Encoding") +} + +// ContentLanguage returns the value for header Content-Language. +func (bgpr BlobGetPropertiesResponse) ContentLanguage() string { + return bgpr.rawResponse.Header.Get("Content-Language") +} + +// ContentLength returns the value for header Content-Length. +func (bgpr BlobGetPropertiesResponse) ContentLength() int64 { + s := bgpr.rawResponse.Header.Get("Content-Length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (bgpr BlobGetPropertiesResponse) ContentMD5() []byte { + s := bgpr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// ContentType returns the value for header Content-Type. +func (bgpr BlobGetPropertiesResponse) ContentType() string { + return bgpr.rawResponse.Header.Get("Content-Type") +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (bgpr BlobGetPropertiesResponse) CopyCompletionTime() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-copy-completion-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// CopyID returns the value for header x-ms-copy-id. +func (bgpr BlobGetPropertiesResponse) CopyID() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (bgpr BlobGetPropertiesResponse) CopyProgress() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-progress") +} + +// CopySource returns the value for header x-ms-copy-source. +func (bgpr BlobGetPropertiesResponse) CopySource() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-source") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (bgpr BlobGetPropertiesResponse) CopyStatus() CopyStatusType { + return CopyStatusType(bgpr.rawResponse.Header.Get("x-ms-copy-status")) +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (bgpr BlobGetPropertiesResponse) CopyStatusDescription() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-status-description") +} + +// CreationTime returns the value for header x-ms-creation-time. +func (bgpr BlobGetPropertiesResponse) CreationTime() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-creation-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// Date returns the value for header Date. +func (bgpr BlobGetPropertiesResponse) Date() time.Time { + s := bgpr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// DestinationSnapshot returns the value for header x-ms-copy-destination-snapshot. +func (bgpr BlobGetPropertiesResponse) DestinationSnapshot() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-destination-snapshot") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bgpr BlobGetPropertiesResponse) ErrorCode() string { + return bgpr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bgpr BlobGetPropertiesResponse) ETag() ETag { + return ETag(bgpr.rawResponse.Header.Get("ETag")) +} + +// IsIncrementalCopy returns the value for header x-ms-incremental-copy. +func (bgpr BlobGetPropertiesResponse) IsIncrementalCopy() string { + return bgpr.rawResponse.Header.Get("x-ms-incremental-copy") +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (bgpr BlobGetPropertiesResponse) IsServerEncrypted() string { + return bgpr.rawResponse.Header.Get("x-ms-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bgpr BlobGetPropertiesResponse) LastModified() time.Time { + s := bgpr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (bgpr BlobGetPropertiesResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(bgpr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (bgpr BlobGetPropertiesResponse) LeaseState() LeaseStateType { + return LeaseStateType(bgpr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (bgpr BlobGetPropertiesResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(bgpr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// RequestID returns the value for header x-ms-request-id. +func (bgpr BlobGetPropertiesResponse) RequestID() string { + return bgpr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bgpr BlobGetPropertiesResponse) Version() string { + return bgpr.rawResponse.Header.Get("x-ms-version") +} + +// BlobHierarchyListSegment ... +type BlobHierarchyListSegment struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Blobs"` + BlobPrefixes []BlobPrefix `xml:"BlobPrefix"` + BlobItems []BlobItem `xml:"Blob"` +} + +// BlobItem - An Azure Storage blob +type BlobItem struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Blob"` + Name string `xml:"Name"` + Deleted bool `xml:"Deleted"` + Snapshot string `xml:"Snapshot"` + Properties BlobProperties `xml:"Properties"` + Metadata Metadata `xml:"Metadata"` +} + +// BlobPrefix ... +type BlobPrefix struct { + Name string `xml:"Name"` +} + +// BlobProperties - Properties of a blob +type BlobProperties struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Properties"` + CreationTime *time.Time `xml:"Creation-Time"` + LastModified time.Time `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + // ContentLength - Size in bytes + ContentLength *int64 `xml:"Content-Length"` + ContentType *string `xml:"Content-Type"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + ContentMD5 []byte `xml:"Content-MD5"` + ContentDisposition *string `xml:"Content-Disposition"` + CacheControl *string `xml:"Cache-Control"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + // BlobType - Possible values include: 'BlobBlockBlob', 'BlobPageBlob', 'BlobAppendBlob', 'BlobNone' + BlobType BlobType `xml:"BlobType"` + // LeaseStatus - Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked', 'LeaseStatusNone' + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + // LeaseState - Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken', 'LeaseStateNone' + LeaseState LeaseStateType `xml:"LeaseState"` + // LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone' + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + CopyID *string `xml:"CopyId"` + // CopyStatus - Possible values include: 'CopyStatusPending', 'CopyStatusSuccess', 'CopyStatusAborted', 'CopyStatusFailed', 'CopyStatusNone' + CopyStatus CopyStatusType `xml:"CopyStatus"` + CopySource *string `xml:"CopySource"` + CopyProgress *string `xml:"CopyProgress"` + CopyCompletionTime *time.Time `xml:"CopyCompletionTime"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + DeletedTime *time.Time `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + // AccessTier - Possible values include: 'AccessTierP4', 'AccessTierP6', 'AccessTierP10', 'AccessTierP20', 'AccessTierP30', 'AccessTierP40', 'AccessTierP50', 'AccessTierHot', 'AccessTierCool', 'AccessTierArchive', 'AccessTierNone' + AccessTier AccessTierType `xml:"AccessTier"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + // ArchiveStatus - Possible values include: 'ArchiveStatusRehydratePendingToHot', 'ArchiveStatusRehydratePendingToCool', 'ArchiveStatusNone' + ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` + AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` +} + +// MarshalXML implements the xml.Marshaler interface for BlobProperties. +func (bp BlobProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + bp2 := (*blobProperties)(unsafe.Pointer(&bp)) + return e.EncodeElement(*bp2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for BlobProperties. +func (bp *BlobProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + bp2 := (*blobProperties)(unsafe.Pointer(bp)) + return d.DecodeElement(bp2, &start) +} + +// BlobReleaseLeaseResponse ... +type BlobReleaseLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (brlr BlobReleaseLeaseResponse) Response() *http.Response { + return brlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (brlr BlobReleaseLeaseResponse) StatusCode() int { + return brlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (brlr BlobReleaseLeaseResponse) Status() string { + return brlr.rawResponse.Status +} + +// Date returns the value for header Date. +func (brlr BlobReleaseLeaseResponse) Date() time.Time { + s := brlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (brlr BlobReleaseLeaseResponse) ErrorCode() string { + return brlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (brlr BlobReleaseLeaseResponse) ETag() ETag { + return ETag(brlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (brlr BlobReleaseLeaseResponse) LastModified() time.Time { + s := brlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (brlr BlobReleaseLeaseResponse) RequestID() string { + return brlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (brlr BlobReleaseLeaseResponse) Version() string { + return brlr.rawResponse.Header.Get("x-ms-version") +} + +// BlobRenewLeaseResponse ... +type BlobRenewLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (brlr BlobRenewLeaseResponse) Response() *http.Response { + return brlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (brlr BlobRenewLeaseResponse) StatusCode() int { + return brlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (brlr BlobRenewLeaseResponse) Status() string { + return brlr.rawResponse.Status +} + +// Date returns the value for header Date. +func (brlr BlobRenewLeaseResponse) Date() time.Time { + s := brlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (brlr BlobRenewLeaseResponse) ErrorCode() string { + return brlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (brlr BlobRenewLeaseResponse) ETag() ETag { + return ETag(brlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (brlr BlobRenewLeaseResponse) LastModified() time.Time { + s := brlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (brlr BlobRenewLeaseResponse) LeaseID() string { + return brlr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (brlr BlobRenewLeaseResponse) RequestID() string { + return brlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (brlr BlobRenewLeaseResponse) Version() string { + return brlr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetHTTPHeadersResponse ... +type BlobSetHTTPHeadersResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bshhr BlobSetHTTPHeadersResponse) Response() *http.Response { + return bshhr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bshhr BlobSetHTTPHeadersResponse) StatusCode() int { + return bshhr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bshhr BlobSetHTTPHeadersResponse) Status() string { + return bshhr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (bshhr BlobSetHTTPHeadersResponse) BlobSequenceNumber() int64 { + s := bshhr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// Date returns the value for header Date. +func (bshhr BlobSetHTTPHeadersResponse) Date() time.Time { + s := bshhr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bshhr BlobSetHTTPHeadersResponse) ErrorCode() string { + return bshhr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bshhr BlobSetHTTPHeadersResponse) ETag() ETag { + return ETag(bshhr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bshhr BlobSetHTTPHeadersResponse) LastModified() time.Time { + s := bshhr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bshhr BlobSetHTTPHeadersResponse) RequestID() string { + return bshhr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bshhr BlobSetHTTPHeadersResponse) Version() string { + return bshhr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetMetadataResponse ... +type BlobSetMetadataResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bsmr BlobSetMetadataResponse) Response() *http.Response { + return bsmr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bsmr BlobSetMetadataResponse) StatusCode() int { + return bsmr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bsmr BlobSetMetadataResponse) Status() string { + return bsmr.rawResponse.Status +} + +// Date returns the value for header Date. +func (bsmr BlobSetMetadataResponse) Date() time.Time { + s := bsmr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bsmr BlobSetMetadataResponse) ErrorCode() string { + return bsmr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bsmr BlobSetMetadataResponse) ETag() ETag { + return ETag(bsmr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bsmr BlobSetMetadataResponse) IsServerEncrypted() string { + return bsmr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bsmr BlobSetMetadataResponse) LastModified() time.Time { + s := bsmr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bsmr BlobSetMetadataResponse) RequestID() string { + return bsmr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bsmr BlobSetMetadataResponse) Version() string { + return bsmr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetTierResponse ... +type BlobSetTierResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bstr BlobSetTierResponse) Response() *http.Response { + return bstr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bstr BlobSetTierResponse) StatusCode() int { + return bstr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bstr BlobSetTierResponse) Status() string { + return bstr.rawResponse.Status +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bstr BlobSetTierResponse) ErrorCode() string { + return bstr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bstr BlobSetTierResponse) RequestID() string { + return bstr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bstr BlobSetTierResponse) Version() string { + return bstr.rawResponse.Header.Get("x-ms-version") +} + +// BlobStartCopyFromURLResponse ... +type BlobStartCopyFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bscfur BlobStartCopyFromURLResponse) Response() *http.Response { + return bscfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bscfur BlobStartCopyFromURLResponse) StatusCode() int { + return bscfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bscfur BlobStartCopyFromURLResponse) Status() string { + return bscfur.rawResponse.Status +} + +// CopyID returns the value for header x-ms-copy-id. +func (bscfur BlobStartCopyFromURLResponse) CopyID() string { + return bscfur.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (bscfur BlobStartCopyFromURLResponse) CopyStatus() CopyStatusType { + return CopyStatusType(bscfur.rawResponse.Header.Get("x-ms-copy-status")) +} + +// Date returns the value for header Date. +func (bscfur BlobStartCopyFromURLResponse) Date() time.Time { + s := bscfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bscfur BlobStartCopyFromURLResponse) ErrorCode() string { + return bscfur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bscfur BlobStartCopyFromURLResponse) ETag() ETag { + return ETag(bscfur.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bscfur BlobStartCopyFromURLResponse) LastModified() time.Time { + s := bscfur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bscfur BlobStartCopyFromURLResponse) RequestID() string { + return bscfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bscfur BlobStartCopyFromURLResponse) Version() string { + return bscfur.rawResponse.Header.Get("x-ms-version") +} + +// BlobUndeleteResponse ... +type BlobUndeleteResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bur BlobUndeleteResponse) Response() *http.Response { + return bur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bur BlobUndeleteResponse) StatusCode() int { + return bur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bur BlobUndeleteResponse) Status() string { + return bur.rawResponse.Status +} + +// Date returns the value for header Date. +func (bur BlobUndeleteResponse) Date() time.Time { + s := bur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bur BlobUndeleteResponse) ErrorCode() string { + return bur.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bur BlobUndeleteResponse) RequestID() string { + return bur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bur BlobUndeleteResponse) Version() string { + return bur.rawResponse.Header.Get("x-ms-version") +} + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block struct { + // Name - The base64 encoded block ID. + Name string `xml:"Name"` + // Size - The block size in bytes. + Size int32 `xml:"Size"` +} + +// BlockBlobCommitBlockListResponse ... +type BlockBlobCommitBlockListResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbcblr BlockBlobCommitBlockListResponse) Response() *http.Response { + return bbcblr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbcblr BlockBlobCommitBlockListResponse) StatusCode() int { + return bbcblr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbcblr BlockBlobCommitBlockListResponse) Status() string { + return bbcblr.rawResponse.Status +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbcblr BlockBlobCommitBlockListResponse) ContentMD5() []byte { + s := bbcblr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (bbcblr BlockBlobCommitBlockListResponse) Date() time.Time { + s := bbcblr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbcblr BlockBlobCommitBlockListResponse) ErrorCode() string { + return bbcblr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bbcblr BlockBlobCommitBlockListResponse) ETag() ETag { + return ETag(bbcblr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbcblr BlockBlobCommitBlockListResponse) IsServerEncrypted() string { + return bbcblr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bbcblr BlockBlobCommitBlockListResponse) LastModified() time.Time { + s := bbcblr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bbcblr BlockBlobCommitBlockListResponse) RequestID() string { + return bbcblr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbcblr BlockBlobCommitBlockListResponse) Version() string { + return bbcblr.rawResponse.Header.Get("x-ms-version") +} + +// BlockBlobStageBlockFromURLResponse ... +type BlockBlobStageBlockFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbsbfur BlockBlobStageBlockFromURLResponse) Response() *http.Response { + return bbsbfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbsbfur BlockBlobStageBlockFromURLResponse) StatusCode() int { + return bbsbfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbsbfur BlockBlobStageBlockFromURLResponse) Status() string { + return bbsbfur.rawResponse.Status +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbsbfur BlockBlobStageBlockFromURLResponse) ContentMD5() []byte { + s := bbsbfur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (bbsbfur BlockBlobStageBlockFromURLResponse) Date() time.Time { + s := bbsbfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbsbfur BlockBlobStageBlockFromURLResponse) ErrorCode() string { + return bbsbfur.rawResponse.Header.Get("x-ms-error-code") +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbsbfur BlockBlobStageBlockFromURLResponse) IsServerEncrypted() string { + return bbsbfur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// RequestID returns the value for header x-ms-request-id. +func (bbsbfur BlockBlobStageBlockFromURLResponse) RequestID() string { + return bbsbfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbsbfur BlockBlobStageBlockFromURLResponse) Version() string { + return bbsbfur.rawResponse.Header.Get("x-ms-version") +} + +// BlockBlobStageBlockResponse ... +type BlockBlobStageBlockResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbsbr BlockBlobStageBlockResponse) Response() *http.Response { + return bbsbr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbsbr BlockBlobStageBlockResponse) StatusCode() int { + return bbsbr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbsbr BlockBlobStageBlockResponse) Status() string { + return bbsbr.rawResponse.Status +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbsbr BlockBlobStageBlockResponse) ContentMD5() []byte { + s := bbsbr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (bbsbr BlockBlobStageBlockResponse) Date() time.Time { + s := bbsbr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbsbr BlockBlobStageBlockResponse) ErrorCode() string { + return bbsbr.rawResponse.Header.Get("x-ms-error-code") +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbsbr BlockBlobStageBlockResponse) IsServerEncrypted() string { + return bbsbr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// RequestID returns the value for header x-ms-request-id. +func (bbsbr BlockBlobStageBlockResponse) RequestID() string { + return bbsbr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbsbr BlockBlobStageBlockResponse) Version() string { + return bbsbr.rawResponse.Header.Get("x-ms-version") +} + +// BlockBlobUploadResponse ... +type BlockBlobUploadResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbur BlockBlobUploadResponse) Response() *http.Response { + return bbur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbur BlockBlobUploadResponse) StatusCode() int { + return bbur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbur BlockBlobUploadResponse) Status() string { + return bbur.rawResponse.Status +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbur BlockBlobUploadResponse) ContentMD5() []byte { + s := bbur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (bbur BlockBlobUploadResponse) Date() time.Time { + s := bbur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbur BlockBlobUploadResponse) ErrorCode() string { + return bbur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bbur BlockBlobUploadResponse) ETag() ETag { + return ETag(bbur.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbur BlockBlobUploadResponse) IsServerEncrypted() string { + return bbur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bbur BlockBlobUploadResponse) LastModified() time.Time { + s := bbur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bbur BlockBlobUploadResponse) RequestID() string { + return bbur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbur BlockBlobUploadResponse) Version() string { + return bbur.rawResponse.Header.Get("x-ms-version") +} + +// BlockList ... +type BlockList struct { + rawResponse *http.Response + CommittedBlocks []Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks []Block `xml:"UncommittedBlocks>Block"` +} + +// Response returns the raw HTTP response object. +func (bl BlockList) Response() *http.Response { + return bl.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bl BlockList) StatusCode() int { + return bl.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bl BlockList) Status() string { + return bl.rawResponse.Status +} + +// BlobContentLength returns the value for header x-ms-blob-content-length. +func (bl BlockList) BlobContentLength() int64 { + s := bl.rawResponse.Header.Get("x-ms-blob-content-length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ContentType returns the value for header Content-Type. +func (bl BlockList) ContentType() string { + return bl.rawResponse.Header.Get("Content-Type") +} + +// Date returns the value for header Date. +func (bl BlockList) Date() time.Time { + s := bl.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bl BlockList) ErrorCode() string { + return bl.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bl BlockList) ETag() ETag { + return ETag(bl.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bl BlockList) LastModified() time.Time { + s := bl.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bl BlockList) RequestID() string { + return bl.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bl BlockList) Version() string { + return bl.rawResponse.Header.Get("x-ms-version") +} + +// BlockLookupList ... +type BlockLookupList struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"BlockList"` + Committed []string `xml:"Committed"` + Uncommitted []string `xml:"Uncommitted"` + Latest []string `xml:"Latest"` +} + +// ClearRange ... +type ClearRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +// ContainerAcquireLeaseResponse ... +type ContainerAcquireLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (calr ContainerAcquireLeaseResponse) Response() *http.Response { + return calr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (calr ContainerAcquireLeaseResponse) StatusCode() int { + return calr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (calr ContainerAcquireLeaseResponse) Status() string { + return calr.rawResponse.Status +} + +// Date returns the value for header Date. +func (calr ContainerAcquireLeaseResponse) Date() time.Time { + s := calr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (calr ContainerAcquireLeaseResponse) ErrorCode() string { + return calr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (calr ContainerAcquireLeaseResponse) ETag() ETag { + return ETag(calr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (calr ContainerAcquireLeaseResponse) LastModified() time.Time { + s := calr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (calr ContainerAcquireLeaseResponse) LeaseID() string { + return calr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (calr ContainerAcquireLeaseResponse) RequestID() string { + return calr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (calr ContainerAcquireLeaseResponse) Version() string { + return calr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerBreakLeaseResponse ... +type ContainerBreakLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cblr ContainerBreakLeaseResponse) Response() *http.Response { + return cblr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cblr ContainerBreakLeaseResponse) StatusCode() int { + return cblr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cblr ContainerBreakLeaseResponse) Status() string { + return cblr.rawResponse.Status +} + +// Date returns the value for header Date. +func (cblr ContainerBreakLeaseResponse) Date() time.Time { + s := cblr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cblr ContainerBreakLeaseResponse) ErrorCode() string { + return cblr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (cblr ContainerBreakLeaseResponse) ETag() ETag { + return ETag(cblr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (cblr ContainerBreakLeaseResponse) LastModified() time.Time { + s := cblr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseTime returns the value for header x-ms-lease-time. +func (cblr ContainerBreakLeaseResponse) LeaseTime() int32 { + s := cblr.rawResponse.Header.Get("x-ms-lease-time") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// RequestID returns the value for header x-ms-request-id. +func (cblr ContainerBreakLeaseResponse) RequestID() string { + return cblr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cblr ContainerBreakLeaseResponse) Version() string { + return cblr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerChangeLeaseResponse ... +type ContainerChangeLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cclr ContainerChangeLeaseResponse) Response() *http.Response { + return cclr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cclr ContainerChangeLeaseResponse) StatusCode() int { + return cclr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cclr ContainerChangeLeaseResponse) Status() string { + return cclr.rawResponse.Status +} + +// Date returns the value for header Date. +func (cclr ContainerChangeLeaseResponse) Date() time.Time { + s := cclr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cclr ContainerChangeLeaseResponse) ErrorCode() string { + return cclr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (cclr ContainerChangeLeaseResponse) ETag() ETag { + return ETag(cclr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (cclr ContainerChangeLeaseResponse) LastModified() time.Time { + s := cclr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (cclr ContainerChangeLeaseResponse) LeaseID() string { + return cclr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (cclr ContainerChangeLeaseResponse) RequestID() string { + return cclr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cclr ContainerChangeLeaseResponse) Version() string { + return cclr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerCreateResponse ... +type ContainerCreateResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (ccr ContainerCreateResponse) Response() *http.Response { + return ccr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ccr ContainerCreateResponse) StatusCode() int { + return ccr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ccr ContainerCreateResponse) Status() string { + return ccr.rawResponse.Status +} + +// Date returns the value for header Date. +func (ccr ContainerCreateResponse) Date() time.Time { + s := ccr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (ccr ContainerCreateResponse) ErrorCode() string { + return ccr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (ccr ContainerCreateResponse) ETag() ETag { + return ETag(ccr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (ccr ContainerCreateResponse) LastModified() time.Time { + s := ccr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (ccr ContainerCreateResponse) RequestID() string { + return ccr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ccr ContainerCreateResponse) Version() string { + return ccr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerDeleteResponse ... +type ContainerDeleteResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cdr ContainerDeleteResponse) Response() *http.Response { + return cdr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cdr ContainerDeleteResponse) StatusCode() int { + return cdr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cdr ContainerDeleteResponse) Status() string { + return cdr.rawResponse.Status +} + +// Date returns the value for header Date. +func (cdr ContainerDeleteResponse) Date() time.Time { + s := cdr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cdr ContainerDeleteResponse) ErrorCode() string { + return cdr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (cdr ContainerDeleteResponse) RequestID() string { + return cdr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cdr ContainerDeleteResponse) Version() string { + return cdr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerGetAccountInfoResponse ... +type ContainerGetAccountInfoResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cgair ContainerGetAccountInfoResponse) Response() *http.Response { + return cgair.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cgair ContainerGetAccountInfoResponse) StatusCode() int { + return cgair.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cgair ContainerGetAccountInfoResponse) Status() string { + return cgair.rawResponse.Status +} + +// AccountKind returns the value for header x-ms-account-kind. +func (cgair ContainerGetAccountInfoResponse) AccountKind() AccountKindType { + return AccountKindType(cgair.rawResponse.Header.Get("x-ms-account-kind")) +} + +// Date returns the value for header Date. +func (cgair ContainerGetAccountInfoResponse) Date() time.Time { + s := cgair.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cgair ContainerGetAccountInfoResponse) ErrorCode() string { + return cgair.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (cgair ContainerGetAccountInfoResponse) RequestID() string { + return cgair.rawResponse.Header.Get("x-ms-request-id") +} + +// SkuName returns the value for header x-ms-sku-name. +func (cgair ContainerGetAccountInfoResponse) SkuName() SkuNameType { + return SkuNameType(cgair.rawResponse.Header.Get("x-ms-sku-name")) +} + +// Version returns the value for header x-ms-version. +func (cgair ContainerGetAccountInfoResponse) Version() string { + return cgair.rawResponse.Header.Get("x-ms-version") +} + +// ContainerGetPropertiesResponse ... +type ContainerGetPropertiesResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (cgpr ContainerGetPropertiesResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range cgpr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (cgpr ContainerGetPropertiesResponse) Response() *http.Response { + return cgpr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cgpr ContainerGetPropertiesResponse) StatusCode() int { + return cgpr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cgpr ContainerGetPropertiesResponse) Status() string { + return cgpr.rawResponse.Status +} + +// BlobPublicAccess returns the value for header x-ms-blob-public-access. +func (cgpr ContainerGetPropertiesResponse) BlobPublicAccess() PublicAccessType { + return PublicAccessType(cgpr.rawResponse.Header.Get("x-ms-blob-public-access")) +} + +// Date returns the value for header Date. +func (cgpr ContainerGetPropertiesResponse) Date() time.Time { + s := cgpr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cgpr ContainerGetPropertiesResponse) ErrorCode() string { + return cgpr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (cgpr ContainerGetPropertiesResponse) ETag() ETag { + return ETag(cgpr.rawResponse.Header.Get("ETag")) +} + +// HasImmutabilityPolicy returns the value for header x-ms-has-immutability-policy. +func (cgpr ContainerGetPropertiesResponse) HasImmutabilityPolicy() string { + return cgpr.rawResponse.Header.Get("x-ms-has-immutability-policy") +} + +// HasLegalHold returns the value for header x-ms-has-legal-hold. +func (cgpr ContainerGetPropertiesResponse) HasLegalHold() string { + return cgpr.rawResponse.Header.Get("x-ms-has-legal-hold") +} + +// LastModified returns the value for header Last-Modified. +func (cgpr ContainerGetPropertiesResponse) LastModified() time.Time { + s := cgpr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (cgpr ContainerGetPropertiesResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(cgpr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (cgpr ContainerGetPropertiesResponse) LeaseState() LeaseStateType { + return LeaseStateType(cgpr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (cgpr ContainerGetPropertiesResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(cgpr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// RequestID returns the value for header x-ms-request-id. +func (cgpr ContainerGetPropertiesResponse) RequestID() string { + return cgpr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cgpr ContainerGetPropertiesResponse) Version() string { + return cgpr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerItem - An Azure Storage container +type ContainerItem struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Container"` + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + Metadata Metadata `xml:"Metadata"` +} + +// ContainerProperties - Properties of a container +type ContainerProperties struct { + LastModified time.Time `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + // LeaseStatus - Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked', 'LeaseStatusNone' + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + // LeaseState - Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken', 'LeaseStateNone' + LeaseState LeaseStateType `xml:"LeaseState"` + // LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone' + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + // PublicAccess - Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone' + PublicAccess PublicAccessType `xml:"PublicAccess"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` +} + +// MarshalXML implements the xml.Marshaler interface for ContainerProperties. +func (cp ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + cp2 := (*containerProperties)(unsafe.Pointer(&cp)) + return e.EncodeElement(*cp2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for ContainerProperties. +func (cp *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + cp2 := (*containerProperties)(unsafe.Pointer(cp)) + return d.DecodeElement(cp2, &start) +} + +// ContainerReleaseLeaseResponse ... +type ContainerReleaseLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (crlr ContainerReleaseLeaseResponse) Response() *http.Response { + return crlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (crlr ContainerReleaseLeaseResponse) StatusCode() int { + return crlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (crlr ContainerReleaseLeaseResponse) Status() string { + return crlr.rawResponse.Status +} + +// Date returns the value for header Date. +func (crlr ContainerReleaseLeaseResponse) Date() time.Time { + s := crlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (crlr ContainerReleaseLeaseResponse) ErrorCode() string { + return crlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (crlr ContainerReleaseLeaseResponse) ETag() ETag { + return ETag(crlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (crlr ContainerReleaseLeaseResponse) LastModified() time.Time { + s := crlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (crlr ContainerReleaseLeaseResponse) RequestID() string { + return crlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (crlr ContainerReleaseLeaseResponse) Version() string { + return crlr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerRenewLeaseResponse ... +type ContainerRenewLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (crlr ContainerRenewLeaseResponse) Response() *http.Response { + return crlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (crlr ContainerRenewLeaseResponse) StatusCode() int { + return crlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (crlr ContainerRenewLeaseResponse) Status() string { + return crlr.rawResponse.Status +} + +// Date returns the value for header Date. +func (crlr ContainerRenewLeaseResponse) Date() time.Time { + s := crlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (crlr ContainerRenewLeaseResponse) ErrorCode() string { + return crlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (crlr ContainerRenewLeaseResponse) ETag() ETag { + return ETag(crlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (crlr ContainerRenewLeaseResponse) LastModified() time.Time { + s := crlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (crlr ContainerRenewLeaseResponse) LeaseID() string { + return crlr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (crlr ContainerRenewLeaseResponse) RequestID() string { + return crlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (crlr ContainerRenewLeaseResponse) Version() string { + return crlr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerSetAccessPolicyResponse ... +type ContainerSetAccessPolicyResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (csapr ContainerSetAccessPolicyResponse) Response() *http.Response { + return csapr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (csapr ContainerSetAccessPolicyResponse) StatusCode() int { + return csapr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (csapr ContainerSetAccessPolicyResponse) Status() string { + return csapr.rawResponse.Status +} + +// Date returns the value for header Date. +func (csapr ContainerSetAccessPolicyResponse) Date() time.Time { + s := csapr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (csapr ContainerSetAccessPolicyResponse) ErrorCode() string { + return csapr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (csapr ContainerSetAccessPolicyResponse) ETag() ETag { + return ETag(csapr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (csapr ContainerSetAccessPolicyResponse) LastModified() time.Time { + s := csapr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (csapr ContainerSetAccessPolicyResponse) RequestID() string { + return csapr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (csapr ContainerSetAccessPolicyResponse) Version() string { + return csapr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerSetMetadataResponse ... +type ContainerSetMetadataResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (csmr ContainerSetMetadataResponse) Response() *http.Response { + return csmr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (csmr ContainerSetMetadataResponse) StatusCode() int { + return csmr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (csmr ContainerSetMetadataResponse) Status() string { + return csmr.rawResponse.Status +} + +// Date returns the value for header Date. +func (csmr ContainerSetMetadataResponse) Date() time.Time { + s := csmr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (csmr ContainerSetMetadataResponse) ErrorCode() string { + return csmr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (csmr ContainerSetMetadataResponse) ETag() ETag { + return ETag(csmr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (csmr ContainerSetMetadataResponse) LastModified() time.Time { + s := csmr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (csmr ContainerSetMetadataResponse) RequestID() string { + return csmr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (csmr ContainerSetMetadataResponse) Version() string { + return csmr.rawResponse.Header.Get("x-ms-version") +} + +// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access +// resources in another domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain +// (the origin domain) to call APIs in another domain +type CorsRule struct { + // AllowedOrigins - The origin domains that are permitted to make a request against the storage service via CORS. The origin domain is the domain from which the request originates. Note that the origin must be an exact case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' to allow all origin domains to make requests via CORS. + AllowedOrigins string `xml:"AllowedOrigins"` + // AllowedMethods - The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods string `xml:"AllowedMethods"` + // AllowedHeaders - the request headers that the origin domain may specify on the CORS request. + AllowedHeaders string `xml:"AllowedHeaders"` + // ExposedHeaders - The response headers that may be sent in the response to the CORS request and exposed by the browser to the request issuer + ExposedHeaders string `xml:"ExposedHeaders"` + // MaxAgeInSeconds - The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds int32 `xml:"MaxAgeInSeconds"` +} + +// downloadResponse - Wraps the response from the blobClient.Download method. +type downloadResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (dr downloadResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range dr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (dr downloadResponse) Response() *http.Response { + return dr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (dr downloadResponse) StatusCode() int { + return dr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (dr downloadResponse) Status() string { + return dr.rawResponse.Status +} + +// Body returns the raw HTTP response object's Body. +func (dr downloadResponse) Body() io.ReadCloser { + return dr.rawResponse.Body +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (dr downloadResponse) AcceptRanges() string { + return dr.rawResponse.Header.Get("Accept-Ranges") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (dr downloadResponse) BlobCommittedBlockCount() int32 { + s := dr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// BlobContentMD5 returns the value for header x-ms-blob-content-md5. +func (dr downloadResponse) BlobContentMD5() []byte { + s := dr.rawResponse.Header.Get("x-ms-blob-content-md5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (dr downloadResponse) BlobSequenceNumber() int64 { + s := dr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// BlobType returns the value for header x-ms-blob-type. +func (dr downloadResponse) BlobType() BlobType { + return BlobType(dr.rawResponse.Header.Get("x-ms-blob-type")) +} + +// CacheControl returns the value for header Cache-Control. +func (dr downloadResponse) CacheControl() string { + return dr.rawResponse.Header.Get("Cache-Control") +} + +// ContentDisposition returns the value for header Content-Disposition. +func (dr downloadResponse) ContentDisposition() string { + return dr.rawResponse.Header.Get("Content-Disposition") +} + +// ContentEncoding returns the value for header Content-Encoding. +func (dr downloadResponse) ContentEncoding() string { + return dr.rawResponse.Header.Get("Content-Encoding") +} + +// ContentLanguage returns the value for header Content-Language. +func (dr downloadResponse) ContentLanguage() string { + return dr.rawResponse.Header.Get("Content-Language") +} + +// ContentLength returns the value for header Content-Length. +func (dr downloadResponse) ContentLength() int64 { + s := dr.rawResponse.Header.Get("Content-Length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (dr downloadResponse) ContentMD5() []byte { + s := dr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// ContentRange returns the value for header Content-Range. +func (dr downloadResponse) ContentRange() string { + return dr.rawResponse.Header.Get("Content-Range") +} + +// ContentType returns the value for header Content-Type. +func (dr downloadResponse) ContentType() string { + return dr.rawResponse.Header.Get("Content-Type") +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (dr downloadResponse) CopyCompletionTime() time.Time { + s := dr.rawResponse.Header.Get("x-ms-copy-completion-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// CopyID returns the value for header x-ms-copy-id. +func (dr downloadResponse) CopyID() string { + return dr.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (dr downloadResponse) CopyProgress() string { + return dr.rawResponse.Header.Get("x-ms-copy-progress") +} + +// CopySource returns the value for header x-ms-copy-source. +func (dr downloadResponse) CopySource() string { + return dr.rawResponse.Header.Get("x-ms-copy-source") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (dr downloadResponse) CopyStatus() CopyStatusType { + return CopyStatusType(dr.rawResponse.Header.Get("x-ms-copy-status")) +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (dr downloadResponse) CopyStatusDescription() string { + return dr.rawResponse.Header.Get("x-ms-copy-status-description") +} + +// Date returns the value for header Date. +func (dr downloadResponse) Date() time.Time { + s := dr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (dr downloadResponse) ErrorCode() string { + return dr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (dr downloadResponse) ETag() ETag { + return ETag(dr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (dr downloadResponse) IsServerEncrypted() string { + return dr.rawResponse.Header.Get("x-ms-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (dr downloadResponse) LastModified() time.Time { + s := dr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (dr downloadResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(dr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (dr downloadResponse) LeaseState() LeaseStateType { + return LeaseStateType(dr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (dr downloadResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(dr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// RequestID returns the value for header x-ms-request-id. +func (dr downloadResponse) RequestID() string { + return dr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (dr downloadResponse) Version() string { + return dr.rawResponse.Header.Get("x-ms-version") +} + +// GeoReplication - Geo-Replication information for the Secondary Storage Service +type GeoReplication struct { + // Status - The status of the secondary location. Possible values include: 'GeoReplicationStatusLive', 'GeoReplicationStatusBootstrap', 'GeoReplicationStatusUnavailable', 'GeoReplicationStatusNone' + Status GeoReplicationStatusType `xml:"Status"` + // LastSyncTime - A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available for read operations at the secondary. Primary writes after this point in time may or may not be available for reads. + LastSyncTime time.Time `xml:"LastSyncTime"` +} + +// MarshalXML implements the xml.Marshaler interface for GeoReplication. +func (gr GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + gr2 := (*geoReplication)(unsafe.Pointer(&gr)) + return e.EncodeElement(*gr2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for GeoReplication. +func (gr *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + gr2 := (*geoReplication)(unsafe.Pointer(gr)) + return d.DecodeElement(gr2, &start) +} + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + ContainerName string `xml:"ContainerName,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + MaxResults int32 `xml:"MaxResults"` + Delimiter string `xml:"Delimiter"` + Segment BlobFlatListSegment `xml:"Blobs"` + NextMarker Marker `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (lbfsr ListBlobsFlatSegmentResponse) Response() *http.Response { + return lbfsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (lbfsr ListBlobsFlatSegmentResponse) StatusCode() int { + return lbfsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (lbfsr ListBlobsFlatSegmentResponse) Status() string { + return lbfsr.rawResponse.Status +} + +// ContentType returns the value for header Content-Type. +func (lbfsr ListBlobsFlatSegmentResponse) ContentType() string { + return lbfsr.rawResponse.Header.Get("Content-Type") +} + +// Date returns the value for header Date. +func (lbfsr ListBlobsFlatSegmentResponse) Date() time.Time { + s := lbfsr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (lbfsr ListBlobsFlatSegmentResponse) ErrorCode() string { + return lbfsr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (lbfsr ListBlobsFlatSegmentResponse) RequestID() string { + return lbfsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (lbfsr ListBlobsFlatSegmentResponse) Version() string { + return lbfsr.rawResponse.Header.Get("x-ms-version") +} + +// ListBlobsHierarchySegmentResponse - An enumeration of blobs +type ListBlobsHierarchySegmentResponse struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + ContainerName string `xml:"ContainerName,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + MaxResults int32 `xml:"MaxResults"` + Delimiter string `xml:"Delimiter"` + Segment BlobHierarchyListSegment `xml:"Blobs"` + NextMarker Marker `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (lbhsr ListBlobsHierarchySegmentResponse) Response() *http.Response { + return lbhsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (lbhsr ListBlobsHierarchySegmentResponse) StatusCode() int { + return lbhsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (lbhsr ListBlobsHierarchySegmentResponse) Status() string { + return lbhsr.rawResponse.Status +} + +// ContentType returns the value for header Content-Type. +func (lbhsr ListBlobsHierarchySegmentResponse) ContentType() string { + return lbhsr.rawResponse.Header.Get("Content-Type") +} + +// Date returns the value for header Date. +func (lbhsr ListBlobsHierarchySegmentResponse) Date() time.Time { + s := lbhsr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (lbhsr ListBlobsHierarchySegmentResponse) ErrorCode() string { + return lbhsr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (lbhsr ListBlobsHierarchySegmentResponse) RequestID() string { + return lbhsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (lbhsr ListBlobsHierarchySegmentResponse) Version() string { + return lbhsr.rawResponse.Header.Get("x-ms-version") +} + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + Prefix string `xml:"Prefix"` + Marker *string `xml:"Marker"` + MaxResults int32 `xml:"MaxResults"` + ContainerItems []ContainerItem `xml:"Containers>Container"` + NextMarker Marker `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (lcsr ListContainersSegmentResponse) Response() *http.Response { + return lcsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (lcsr ListContainersSegmentResponse) StatusCode() int { + return lcsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (lcsr ListContainersSegmentResponse) Status() string { + return lcsr.rawResponse.Status +} + +// ErrorCode returns the value for header x-ms-error-code. +func (lcsr ListContainersSegmentResponse) ErrorCode() string { + return lcsr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (lcsr ListContainersSegmentResponse) RequestID() string { + return lcsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (lcsr ListContainersSegmentResponse) Version() string { + return lcsr.rawResponse.Header.Get("x-ms-version") +} + +// Logging - Azure Analytics Logging settings. +type Logging struct { + // Version - The version of Storage Analytics to configure. + Version string `xml:"Version"` + // Delete - Indicates whether all delete requests should be logged. + Delete bool `xml:"Delete"` + // Read - Indicates whether all read requests should be logged. + Read bool `xml:"Read"` + // Write - Indicates whether all write requests should be logged. + Write bool `xml:"Write"` + RetentionPolicy RetentionPolicy `xml:"RetentionPolicy"` +} + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +type Metrics struct { + // Version - The version of Storage Analytics to configure. + Version *string `xml:"Version"` + // Enabled - Indicates whether metrics are enabled for the Blob service. + Enabled bool `xml:"Enabled"` + // IncludeAPIs - Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` +} + +// PageBlobClearPagesResponse ... +type PageBlobClearPagesResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbcpr PageBlobClearPagesResponse) Response() *http.Response { + return pbcpr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbcpr PageBlobClearPagesResponse) StatusCode() int { + return pbcpr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbcpr PageBlobClearPagesResponse) Status() string { + return pbcpr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbcpr PageBlobClearPagesResponse) BlobSequenceNumber() int64 { + s := pbcpr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (pbcpr PageBlobClearPagesResponse) ContentMD5() []byte { + s := pbcpr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (pbcpr PageBlobClearPagesResponse) Date() time.Time { + s := pbcpr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbcpr PageBlobClearPagesResponse) ErrorCode() string { + return pbcpr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbcpr PageBlobClearPagesResponse) ETag() ETag { + return ETag(pbcpr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbcpr PageBlobClearPagesResponse) LastModified() time.Time { + s := pbcpr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbcpr PageBlobClearPagesResponse) RequestID() string { + return pbcpr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbcpr PageBlobClearPagesResponse) Version() string { + return pbcpr.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobCopyIncrementalResponse ... +type PageBlobCopyIncrementalResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbcir PageBlobCopyIncrementalResponse) Response() *http.Response { + return pbcir.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbcir PageBlobCopyIncrementalResponse) StatusCode() int { + return pbcir.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbcir PageBlobCopyIncrementalResponse) Status() string { + return pbcir.rawResponse.Status +} + +// CopyID returns the value for header x-ms-copy-id. +func (pbcir PageBlobCopyIncrementalResponse) CopyID() string { + return pbcir.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (pbcir PageBlobCopyIncrementalResponse) CopyStatus() CopyStatusType { + return CopyStatusType(pbcir.rawResponse.Header.Get("x-ms-copy-status")) +} + +// Date returns the value for header Date. +func (pbcir PageBlobCopyIncrementalResponse) Date() time.Time { + s := pbcir.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbcir PageBlobCopyIncrementalResponse) ErrorCode() string { + return pbcir.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbcir PageBlobCopyIncrementalResponse) ETag() ETag { + return ETag(pbcir.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbcir PageBlobCopyIncrementalResponse) LastModified() time.Time { + s := pbcir.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbcir PageBlobCopyIncrementalResponse) RequestID() string { + return pbcir.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbcir PageBlobCopyIncrementalResponse) Version() string { + return pbcir.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobCreateResponse ... +type PageBlobCreateResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbcr PageBlobCreateResponse) Response() *http.Response { + return pbcr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbcr PageBlobCreateResponse) StatusCode() int { + return pbcr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbcr PageBlobCreateResponse) Status() string { + return pbcr.rawResponse.Status +} + +// ContentMD5 returns the value for header Content-MD5. +func (pbcr PageBlobCreateResponse) ContentMD5() []byte { + s := pbcr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (pbcr PageBlobCreateResponse) Date() time.Time { + s := pbcr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbcr PageBlobCreateResponse) ErrorCode() string { + return pbcr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbcr PageBlobCreateResponse) ETag() ETag { + return ETag(pbcr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (pbcr PageBlobCreateResponse) IsServerEncrypted() string { + return pbcr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (pbcr PageBlobCreateResponse) LastModified() time.Time { + s := pbcr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbcr PageBlobCreateResponse) RequestID() string { + return pbcr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbcr PageBlobCreateResponse) Version() string { + return pbcr.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobResizeResponse ... +type PageBlobResizeResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbrr PageBlobResizeResponse) Response() *http.Response { + return pbrr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbrr PageBlobResizeResponse) StatusCode() int { + return pbrr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbrr PageBlobResizeResponse) Status() string { + return pbrr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbrr PageBlobResizeResponse) BlobSequenceNumber() int64 { + s := pbrr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// Date returns the value for header Date. +func (pbrr PageBlobResizeResponse) Date() time.Time { + s := pbrr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbrr PageBlobResizeResponse) ErrorCode() string { + return pbrr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbrr PageBlobResizeResponse) ETag() ETag { + return ETag(pbrr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbrr PageBlobResizeResponse) LastModified() time.Time { + s := pbrr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbrr PageBlobResizeResponse) RequestID() string { + return pbrr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbrr PageBlobResizeResponse) Version() string { + return pbrr.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobUpdateSequenceNumberResponse ... +type PageBlobUpdateSequenceNumberResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbusnr PageBlobUpdateSequenceNumberResponse) Response() *http.Response { + return pbusnr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbusnr PageBlobUpdateSequenceNumberResponse) StatusCode() int { + return pbusnr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbusnr PageBlobUpdateSequenceNumberResponse) Status() string { + return pbusnr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbusnr PageBlobUpdateSequenceNumberResponse) BlobSequenceNumber() int64 { + s := pbusnr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// Date returns the value for header Date. +func (pbusnr PageBlobUpdateSequenceNumberResponse) Date() time.Time { + s := pbusnr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbusnr PageBlobUpdateSequenceNumberResponse) ErrorCode() string { + return pbusnr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbusnr PageBlobUpdateSequenceNumberResponse) ETag() ETag { + return ETag(pbusnr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbusnr PageBlobUpdateSequenceNumberResponse) LastModified() time.Time { + s := pbusnr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbusnr PageBlobUpdateSequenceNumberResponse) RequestID() string { + return pbusnr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbusnr PageBlobUpdateSequenceNumberResponse) Version() string { + return pbusnr.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobUploadPagesResponse ... +type PageBlobUploadPagesResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbupr PageBlobUploadPagesResponse) Response() *http.Response { + return pbupr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbupr PageBlobUploadPagesResponse) StatusCode() int { + return pbupr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbupr PageBlobUploadPagesResponse) Status() string { + return pbupr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbupr PageBlobUploadPagesResponse) BlobSequenceNumber() int64 { + s := pbupr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (pbupr PageBlobUploadPagesResponse) ContentMD5() []byte { + s := pbupr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (pbupr PageBlobUploadPagesResponse) Date() time.Time { + s := pbupr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbupr PageBlobUploadPagesResponse) ErrorCode() string { + return pbupr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbupr PageBlobUploadPagesResponse) ETag() ETag { + return ETag(pbupr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (pbupr PageBlobUploadPagesResponse) IsServerEncrypted() string { + return pbupr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (pbupr PageBlobUploadPagesResponse) LastModified() time.Time { + s := pbupr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbupr PageBlobUploadPagesResponse) RequestID() string { + return pbupr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbupr PageBlobUploadPagesResponse) Version() string { + return pbupr.rawResponse.Header.Get("x-ms-version") +} + +// PageList - the list of pages +type PageList struct { + rawResponse *http.Response + PageRange []PageRange `xml:"PageRange"` + ClearRange []ClearRange `xml:"ClearRange"` +} + +// Response returns the raw HTTP response object. +func (pl PageList) Response() *http.Response { + return pl.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pl PageList) StatusCode() int { + return pl.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pl PageList) Status() string { + return pl.rawResponse.Status +} + +// BlobContentLength returns the value for header x-ms-blob-content-length. +func (pl PageList) BlobContentLength() int64 { + s := pl.rawResponse.Header.Get("x-ms-blob-content-length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// Date returns the value for header Date. +func (pl PageList) Date() time.Time { + s := pl.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pl PageList) ErrorCode() string { + return pl.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pl PageList) ETag() ETag { + return ETag(pl.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pl PageList) LastModified() time.Time { + s := pl.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pl PageList) RequestID() string { + return pl.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pl PageList) Version() string { + return pl.rawResponse.Header.Get("x-ms-version") +} + +// PageRange ... +type PageRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy struct { + // Enabled - Indicates whether a retention policy is enabled for the storage service + Enabled bool `xml:"Enabled"` + // Days - Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this value will be deleted + Days *int32 `xml:"Days"` +} + +// ServiceGetAccountInfoResponse ... +type ServiceGetAccountInfoResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (sgair ServiceGetAccountInfoResponse) Response() *http.Response { + return sgair.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (sgair ServiceGetAccountInfoResponse) StatusCode() int { + return sgair.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (sgair ServiceGetAccountInfoResponse) Status() string { + return sgair.rawResponse.Status +} + +// AccountKind returns the value for header x-ms-account-kind. +func (sgair ServiceGetAccountInfoResponse) AccountKind() AccountKindType { + return AccountKindType(sgair.rawResponse.Header.Get("x-ms-account-kind")) +} + +// Date returns the value for header Date. +func (sgair ServiceGetAccountInfoResponse) Date() time.Time { + s := sgair.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (sgair ServiceGetAccountInfoResponse) ErrorCode() string { + return sgair.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (sgair ServiceGetAccountInfoResponse) RequestID() string { + return sgair.rawResponse.Header.Get("x-ms-request-id") +} + +// SkuName returns the value for header x-ms-sku-name. +func (sgair ServiceGetAccountInfoResponse) SkuName() SkuNameType { + return SkuNameType(sgair.rawResponse.Header.Get("x-ms-sku-name")) +} + +// Version returns the value for header x-ms-version. +func (sgair ServiceGetAccountInfoResponse) Version() string { + return sgair.rawResponse.Header.Get("x-ms-version") +} + +// ServiceSetPropertiesResponse ... +type ServiceSetPropertiesResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (sspr ServiceSetPropertiesResponse) Response() *http.Response { + return sspr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (sspr ServiceSetPropertiesResponse) StatusCode() int { + return sspr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (sspr ServiceSetPropertiesResponse) Status() string { + return sspr.rawResponse.Status +} + +// ErrorCode returns the value for header x-ms-error-code. +func (sspr ServiceSetPropertiesResponse) ErrorCode() string { + return sspr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (sspr ServiceSetPropertiesResponse) RequestID() string { + return sspr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (sspr ServiceSetPropertiesResponse) Version() string { + return sspr.rawResponse.Header.Get("x-ms-version") +} + +// SignedIdentifier - signed identifier +type SignedIdentifier struct { + // ID - a unique id + ID string `xml:"Id"` + AccessPolicy AccessPolicy `xml:"AccessPolicy"` +} + +// SignedIdentifiers - Wraps the response from the containerClient.GetAccessPolicy method. +type SignedIdentifiers struct { + rawResponse *http.Response + Items []SignedIdentifier `xml:"SignedIdentifier"` +} + +// Response returns the raw HTTP response object. +func (si SignedIdentifiers) Response() *http.Response { + return si.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (si SignedIdentifiers) StatusCode() int { + return si.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (si SignedIdentifiers) Status() string { + return si.rawResponse.Status +} + +// BlobPublicAccess returns the value for header x-ms-blob-public-access. +func (si SignedIdentifiers) BlobPublicAccess() PublicAccessType { + return PublicAccessType(si.rawResponse.Header.Get("x-ms-blob-public-access")) +} + +// Date returns the value for header Date. +func (si SignedIdentifiers) Date() time.Time { + s := si.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (si SignedIdentifiers) ErrorCode() string { + return si.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (si SignedIdentifiers) ETag() ETag { + return ETag(si.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (si SignedIdentifiers) LastModified() time.Time { + s := si.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (si SignedIdentifiers) RequestID() string { + return si.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (si SignedIdentifiers) Version() string { + return si.rawResponse.Header.Get("x-ms-version") +} + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite struct { + // Enabled - Indicates whether this account is hosting a static website + Enabled bool `xml:"Enabled"` + // IndexDocument - The default name of the index page under each directory + IndexDocument *string `xml:"IndexDocument"` + // ErrorDocument404Path - The absolute path of the custom 404 page + ErrorDocument404Path *string `xml:"ErrorDocument404Path"` +} + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties struct { + rawResponse *http.Response + Logging *Logging `xml:"Logging"` + HourMetrics *Metrics `xml:"HourMetrics"` + MinuteMetrics *Metrics `xml:"MinuteMetrics"` + // Cors - The set of CORS rules. + Cors []CorsRule `xml:"Cors>CorsRule"` + // DefaultServiceVersion - The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string `xml:"DefaultServiceVersion"` + DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` + StaticWebsite *StaticWebsite `xml:"StaticWebsite"` +} + +// Response returns the raw HTTP response object. +func (ssp StorageServiceProperties) Response() *http.Response { + return ssp.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ssp StorageServiceProperties) StatusCode() int { + return ssp.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ssp StorageServiceProperties) Status() string { + return ssp.rawResponse.Status +} + +// ErrorCode returns the value for header x-ms-error-code. +func (ssp StorageServiceProperties) ErrorCode() string { + return ssp.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (ssp StorageServiceProperties) RequestID() string { + return ssp.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ssp StorageServiceProperties) Version() string { + return ssp.rawResponse.Header.Get("x-ms-version") +} + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats struct { + rawResponse *http.Response + GeoReplication *GeoReplication `xml:"GeoReplication"` +} + +// Response returns the raw HTTP response object. +func (sss StorageServiceStats) Response() *http.Response { + return sss.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (sss StorageServiceStats) StatusCode() int { + return sss.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (sss StorageServiceStats) Status() string { + return sss.rawResponse.Status +} + +// Date returns the value for header Date. +func (sss StorageServiceStats) Date() time.Time { + s := sss.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (sss StorageServiceStats) ErrorCode() string { + return sss.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (sss StorageServiceStats) RequestID() string { + return sss.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (sss StorageServiceStats) Version() string { + return sss.rawResponse.Header.Get("x-ms-version") +} + +func init() { + if reflect.TypeOf((*AccessPolicy)(nil)).Elem().Size() != reflect.TypeOf((*accessPolicy)(nil)).Elem().Size() { + validateError(errors.New("size mismatch between AccessPolicy and accessPolicy")) + } + if reflect.TypeOf((*BlobProperties)(nil)).Elem().Size() != reflect.TypeOf((*blobProperties)(nil)).Elem().Size() { + validateError(errors.New("size mismatch between BlobProperties and blobProperties")) + } + if reflect.TypeOf((*ContainerProperties)(nil)).Elem().Size() != reflect.TypeOf((*containerProperties)(nil)).Elem().Size() { + validateError(errors.New("size mismatch between ContainerProperties and containerProperties")) + } + if reflect.TypeOf((*GeoReplication)(nil)).Elem().Size() != reflect.TypeOf((*geoReplication)(nil)).Elem().Size() { + validateError(errors.New("size mismatch between GeoReplication and geoReplication")) + } +} + +const ( + rfc3339Format = "2006-01-02T15:04:05.0000000Z07:00" +) + +// used to convert times from UTC to GMT before sending across the wire +var gmt = time.FixedZone("GMT", 0) + +// internal type used for marshalling time in RFC1123 format +type timeRFC1123 struct { + time.Time +} + +// MarshalText implements the encoding.TextMarshaler interface for timeRFC1123. +func (t timeRFC1123) MarshalText() ([]byte, error) { + return []byte(t.Format(time.RFC1123)), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC1123. +func (t *timeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = time.Parse(time.RFC1123, string(data)) + return +} + +// internal type used for marshalling time in RFC3339 format +type timeRFC3339 struct { + time.Time +} + +// MarshalText implements the encoding.TextMarshaler interface for timeRFC3339. +func (t timeRFC3339) MarshalText() ([]byte, error) { + return []byte(t.Format(rfc3339Format)), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC3339. +func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { + t.Time, err = time.Parse(rfc3339Format, string(data)) + return +} + +// internal type used for marshalling base64 encoded strings +type base64Encoded struct { + b []byte +} + +// MarshalText implements the encoding.TextMarshaler interface for base64Encoded. +func (c base64Encoded) MarshalText() ([]byte, error) { + return []byte(base64.StdEncoding.EncodeToString(c.b)), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for base64Encoded. +func (c *base64Encoded) UnmarshalText(data []byte) error { + b, err := base64.StdEncoding.DecodeString(string(data)) + if err != nil { + return err + } + c.b = b + return nil +} + +// internal type used for marshalling +type accessPolicy struct { + Start timeRFC3339 `xml:"Start"` + Expiry timeRFC3339 `xml:"Expiry"` + Permission string `xml:"Permission"` +} + +// internal type used for marshalling +type blobProperties struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Properties"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + LastModified timeRFC1123 `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + ContentLength *int64 `xml:"Content-Length"` + ContentType *string `xml:"Content-Type"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + ContentMD5 base64Encoded `xml:"Content-MD5"` + ContentDisposition *string `xml:"Content-Disposition"` + CacheControl *string `xml:"Cache-Control"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + BlobType BlobType `xml:"BlobType"` + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + LeaseState LeaseStateType `xml:"LeaseState"` + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + CopyID *string `xml:"CopyId"` + CopyStatus CopyStatusType `xml:"CopyStatus"` + CopySource *string `xml:"CopySource"` + CopyProgress *string `xml:"CopyProgress"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + AccessTier AccessTierType `xml:"AccessTier"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` +} + +// internal type used for marshalling +type containerProperties struct { + LastModified timeRFC1123 `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + LeaseState LeaseStateType `xml:"LeaseState"` + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + PublicAccess PublicAccessType `xml:"PublicAccess"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` +} + +// internal type used for marshalling +type geoReplication struct { + Status GeoReplicationStatusType `xml:"Status"` + LastSyncTime timeRFC1123 `xml:"LastSyncTime"` +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go new file mode 100644 index 000000000..ad588eed7 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go @@ -0,0 +1,779 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/base64" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// pageBlobClient is the client for the PageBlob methods of the Azblob service. +type pageBlobClient struct { + managementClient +} + +// newPageBlobClient creates an instance of the pageBlobClient client. +func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient { + return pageBlobClient{newManagementClient(url, p)} +} + +// ClearPages the Clear Pages operation clears a set of pages from a page blob +// +// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number +// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob +// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate +// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +// recorded in the analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.clearPagesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobClearPagesResponse), err +} + +// clearPagesPreparer prepares the ClearPages request. +func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "page") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifSequenceNumberLessThanOrEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) + } + if ifSequenceNumberLessThan != nil { + req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) + } + if ifSequenceNumberEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-page-write", "clear") + return req, nil +} + +// clearPagesResponder handles the response to the ClearPages request. +func (client pageBlobClient) clearPagesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobClearPagesResponse{rawResponse: resp.Response()}, err +} + +// CopyIncremental the Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. +// The snapshot is copied such that only the differential changes between the previously copied snapshot are +// transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or +// copied from as usual. This API is supported since REST version 2016-05-31. +// +// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that +// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob +// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded +// in the analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) CopyIncremental(ctx context.Context, copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobCopyIncrementalResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.copyIncrementalPreparer(copySource, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyIncrementalResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobCopyIncrementalResponse), err +} + +// copyIncrementalPreparer prepares the CopyIncremental request. +func (client pageBlobClient) copyIncrementalPreparer(copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "incrementalcopy") + req.URL.RawQuery = params.Encode() + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-copy-source", copySource) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// copyIncrementalResponder handles the response to the CopyIncremental request. +func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobCopyIncrementalResponse{rawResponse: resp.Response()}, err +} + +// Create the Create operation creates a new page blob. +// +// contentLength is the length of the request. blobContentLength is this header specifies the maximum size for the page +// blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. blobContentType is optional. Sets the blob's content type. If specified, +// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the +// blob's content encoding. If specified, this property is stored with the blob and returned with a read request. +// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the +// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this +// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded. +// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and +// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the +// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the +// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified +// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, +// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and +// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is +// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobSequenceNumber is set +// for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of +// the sequence number must be between 0 and 2^63 - 1. requestID is provides a client-generated, opaque value with a 1 +// KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createPreparer(contentLength, blobContentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobCreateResponse), err +} + +// createPreparer prepares the Create request. +func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) + if blobSequenceNumber != nil { + req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-blob-type", "PageBlob") + return req, nil +} + +// createResponder handles the response to the Create request. +func (client pageBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobCreateResponse{rawResponse: resp.Response()}, err +} + +// GetPageRanges the Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a +// page blob +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageList), err +} + +// getPageRangesPreparer prepares the GetPageRanges request. +func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "pagelist") + req.URL.RawQuery = params.Encode() + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPageRangesResponder handles the response to the GetPageRanges request. +func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &PageList{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetPageRangesDiff [Update] The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob +// that were changed between target blob and previous snapshot. +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. prevsnapshot is optional in version 2015-07-08 and newer. The prevsnapshot +// parameter is a DateTime value that specifies that the response will contain only pages that were changed between +// target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a +// snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots +// are currently supported only for blobs created on or after January 1, 2016. rangeParameter is return only the bytes +// of the blob in the specified range. leaseID is if specified, the operation only succeeds if the resource's lease is +// active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it has been +// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if +// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs +// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesDiffResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageList), err +} + +// getPageRangesDiffPreparer prepares the GetPageRangesDiff request. +func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if prevsnapshot != nil && len(*prevsnapshot) > 0 { + params.Set("prevsnapshot", *prevsnapshot) + } + params.Set("comp", "pagelist") + req.URL.RawQuery = params.Encode() + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPageRangesDiffResponder handles the response to the GetPageRangesDiff request. +func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &PageList{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// Resize resize the Blob +// +// blobContentLength is this header specifies the maximum size for the page blob, up to 1 TB. The page blob size must +// be aligned to a 512-byte boundary. timeout is the timeout parameter is expressed in seconds. For more information, +// see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it +// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on +// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.resizePreparer(blobContentLength, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.resizeResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobResizeResponse), err +} + +// resizePreparer prepares the Resize request. +func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// resizeResponder handles the response to the Resize request. +func (client pageBlobClient) resizeResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobResizeResponse{rawResponse: resp.Response()}, err +} + +// UpdateSequenceNumber update the sequence number of the blob +// +// sequenceNumberAction is required if the x-ms-blob-sequence-number header is set for the request. This property +// applies to page blobs only. This property indicates how the service should modify the blob's sequence number timeout +// is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it +// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on +// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can use to +// track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobUpdateSequenceNumberResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.updateSequenceNumberPreparer(sequenceNumberAction, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.updateSequenceNumberResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobUpdateSequenceNumberResponse), err +} + +// updateSequenceNumberPreparer prepares the UpdateSequenceNumber request. +func (client pageBlobClient) updateSequenceNumberPreparer(sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction)) + if blobSequenceNumber != nil { + req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// updateSequenceNumberResponder handles the response to the UpdateSequenceNumber request. +func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobUpdateSequenceNumberResponse{rawResponse: resp.Response()}, err +} + +// UploadPages the Upload Pages operation writes a range of pages to a page blob +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. transactionalContentMD5 is specify the transactional md5 for the +// body, to be validated by the service. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number +// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob +// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate +// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +// recorded in the analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobUploadPagesResponse), err +} + +// uploadPagesPreparer prepares the UploadPages request. +func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "page") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifSequenceNumberLessThanOrEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) + } + if ifSequenceNumberLessThan != nil { + req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) + } + if ifSequenceNumberEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-page-write", "update") + return req, nil +} + +// uploadPagesResponder handles the response to the UploadPages request. +func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobUploadPagesResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go new file mode 100644 index 000000000..8a023d0a0 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go @@ -0,0 +1,74 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io/ioutil" +) + +type responder func(resp pipeline.Response) (result pipeline.Response, err error) + +// ResponderPolicyFactory is a Factory capable of creating a responder pipeline. +type responderPolicyFactory struct { + responder responder +} + +// New creates a responder policy factory. +func (arpf responderPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return responderPolicy{next: next, responder: arpf.responder} +} + +type responderPolicy struct { + next pipeline.Policy + responder responder +} + +// Do sends the request to the service and validates/deserializes the HTTP response. +func (arp responderPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + resp, err := arp.next.Do(ctx, request) + if err != nil { + return resp, err + } + return arp.responder(resp) +} + +// validateResponse checks an HTTP response's status code against a legal set of codes. +// If the response code is not legal, then validateResponse reads all of the response's body +// (containing error information) and returns a response error. +func validateResponse(resp pipeline.Response, successStatusCodes ...int) error { + if resp == nil { + return NewResponseError(nil, nil, "nil response") + } + responseCode := resp.Response().StatusCode + for _, i := range successStatusCodes { + if i == responseCode { + return nil + } + } + // only close the body in the failure case. in the + // success case responders will close the body as required. + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return err + } + // the service code, description and details will be populated during unmarshalling + responseError := NewResponseError(nil, resp.Response(), resp.Response().Status) + if len(b) > 0 { + if err = xml.Unmarshal(b, &responseError); err != nil { + return NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return responseError +} + +// removes any BOM from the byte slice +func removeBOM(b []byte) []byte { + // UTF8 + return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go new file mode 100644 index 000000000..3dcc75bb5 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go @@ -0,0 +1,95 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "fmt" + "github.com/Azure/azure-pipeline-go/pipeline" + "net" + "net/http" +) + +// if you want to provide custom error handling set this variable to your constructor function +var responseErrorFactory func(cause error, response *http.Response, description string) error + +// ResponseError identifies a responder-generated network or response parsing error. +type ResponseError interface { + // Error exposes the Error(), Temporary() and Timeout() methods. + net.Error // Includes the Go error interface + // Response returns the HTTP response. You may examine this but you should not modify it. + Response() *http.Response +} + +// NewResponseError creates an error object that implements the error interface. +func NewResponseError(cause error, response *http.Response, description string) error { + if responseErrorFactory != nil { + return responseErrorFactory(cause, response, description) + } + return &responseError{ + ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), + response: response, + description: description, + } +} + +// responseError is the internal struct that implements the public ResponseError interface. +type responseError struct { + pipeline.ErrorNode // This is embedded so that responseError "inherits" Error, Temporary, Timeout, and Cause + response *http.Response + description string +} + +// Error implements the error interface's Error method to return a string representation of the error. +func (e *responseError) Error() string { + b := &bytes.Buffer{} + fmt.Fprintf(b, "===== RESPONSE ERROR (Code=%v) =====\n", e.response.StatusCode) + fmt.Fprintf(b, "Status=%s, Description: %s\n", e.response.Status, e.description) + s := b.String() + return e.ErrorNode.Error(s) +} + +// Response implements the ResponseError interface's method to return the HTTP response. +func (e *responseError) Response() *http.Response { + return e.response +} + +// RFC7807 PROBLEM ------------------------------------------------------------------------------------ +// RFC7807Problem ... This type can be publicly embedded in another type that wants to add additional members. +/*type RFC7807Problem struct { + // Mandatory: A (relative) URI reference identifying the problem type (it MAY refer to human-readable documentation). + typeURI string // Should default to "about:blank" + // Optional: Short, human-readable summary (maybe localized). + title string + // Optional: HTTP status code generated by the origin server + status int + // Optional: Human-readable explanation for this problem occurance. + // Should help client correct the problem. Clients should NOT parse this string. + detail string + // Optional: A (relative) URI identifying this specific problem occurence (it may or may not be dereferenced). + instance string +} +// NewRFC7807Problem ... +func NewRFC7807Problem(typeURI string, status int, titleFormat string, a ...interface{}) error { + return &RFC7807Problem{ + typeURI: typeURI, + status: status, + title: fmt.Sprintf(titleFormat, a...), + } +} +// Error returns the error information as a string. +func (e *RFC7807Problem) Error() string { + return e.title +} +// TypeURI ... +func (e *RFC7807Problem) TypeURI() string { + if e.typeURI == "" { + e.typeURI = "about:blank" + } + return e.typeURI +} +// Members ... +func (e *RFC7807Problem) Members() (status int, title, detail, instance string) { + return e.status, e.title, e.detail, e.instance +}*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go new file mode 100644 index 000000000..c6840b43b --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go @@ -0,0 +1,388 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" +) + +// serviceClient is the client for the Service methods of the Azblob service. +type serviceClient struct { + managementClient +} + +// newServiceClient creates an instance of the serviceClient client. +func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient { + return serviceClient{newManagementClient(url, p)} +} + +// GetAccountInfo returns the sku name and account kind +func (client serviceClient) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) { + req, err := client.getAccountInfoPreparer() + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ServiceGetAccountInfoResponse), err +} + +// getAccountInfoPreparer prepares the GetAccountInfo request. +func (client serviceClient) getAccountInfoPreparer() (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("restype", "account") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + return req, nil +} + +// getAccountInfoResponder handles the response to the GetAccountInfo request. +func (client serviceClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ServiceGetAccountInfoResponse{rawResponse: resp.Response()}, err +} + +// GetProperties gets the properties of a storage account's Blob service, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) GetProperties(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceProperties, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPropertiesPreparer(timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*StorageServiceProperties), err +} + +// getPropertiesPreparer prepares the GetProperties request. +func (client serviceClient) getPropertiesPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "service") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPropertiesResponder handles the response to the GetProperties request. +func (client serviceClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &StorageServiceProperties{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetStatistics retrieves statistics related to replication for the Blob service. It is only available on the +// secondary location endpoint when read-access geo-redundant replication is enabled for the storage account. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) GetStatistics(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceStats, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getStatisticsPreparer(timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getStatisticsResponder}, req) + if err != nil { + return nil, err + } + return resp.(*StorageServiceStats), err +} + +// getStatisticsPreparer prepares the GetStatistics request. +func (client serviceClient) getStatisticsPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "service") + params.Set("comp", "stats") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getStatisticsResponder handles the response to the GetStatistics request. +func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &StorageServiceStats{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// ListContainersSegment the List Containers Segment operation returns a list of the containers under the specified +// account +// +// prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a +// string value that identifies the portion of the list of containers to be returned with the next listing operation. +// The operation returns the NextMarker value within the response body if the listing operation did not return all +// containers remaining to be listed with the current page. The NextMarker value can be used as the value for the +// marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the +// client. maxresults is specifies the maximum number of containers to return. If the request does not specify +// maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the +// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the +// remainder of the results. For this reason, it is possible that the service will return fewer results than specified +// by maxresults, or than the default of 5000. include is include this parameter to specify that the container's +// metadata be returned as part of the response body. timeout is the timeout parameter is expressed in seconds. For +// more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) { + if err := validate([]validation{ + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.listContainersSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listContainersSegmentResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ListContainersSegmentResponse), err +} + +// listContainersSegmentPreparer prepares the ListContainersSegment request. +func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if prefix != nil && len(*prefix) > 0 { + params.Set("prefix", *prefix) + } + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + if include != ListContainersIncludeNone { + params.Set("include", string(include)) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "list") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// listContainersSegmentResponder handles the response to the ListContainersSegment request. +func (client serviceClient) listContainersSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &ListContainersSegmentResponse{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// SetProperties sets properties for a storage account's Blob service endpoint, including properties for Storage +// Analytics and CORS (Cross-Origin Resource Sharing) rules +// +// storageServiceProperties is the StorageService properties. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (*ServiceSetPropertiesResponse, error) { + if err := validate([]validation{ + {targetValue: storageServiceProperties, + constraints: []constraint{{target: "storageServiceProperties.Logging", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy", name: null, rule: true, + chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}, + }}, + {target: "storageServiceProperties.HourMetrics", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}, + }}, + {target: "storageServiceProperties.MinuteMetrics", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}, + }}, + {target: "storageServiceProperties.DeleteRetentionPolicy", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setPropertiesPreparer(storageServiceProperties, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ServiceSetPropertiesResponse), err +} + +// setPropertiesPreparer prepares the SetProperties request. +func (client serviceClient) setPropertiesPreparer(storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "service") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + b, err := xml.Marshal(storageServiceProperties) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// setPropertiesResponder handles the response to the SetProperties request. +func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go new file mode 100644 index 000000000..98a2614e6 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go @@ -0,0 +1,367 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "fmt" + "github.com/Azure/azure-pipeline-go/pipeline" + "reflect" + "regexp" + "strings" +) + +// Constraint stores constraint name, target field name +// Rule and chain validations. +type constraint struct { + // Target field name for validation. + target string + + // Constraint name e.g. minLength, MaxLength, Pattern, etc. + name string + + // Rule for constraint e.g. greater than 10, less than 5 etc. + rule interface{} + + // Chain validations for struct type + chain []constraint +} + +// Validation stores parameter-wise validation. +type validation struct { + targetValue interface{} + constraints []constraint +} + +// Constraint list +const ( + empty = "Empty" + null = "Null" + readOnly = "ReadOnly" + pattern = "Pattern" + maxLength = "MaxLength" + minLength = "MinLength" + maxItems = "MaxItems" + minItems = "MinItems" + multipleOf = "MultipleOf" + uniqueItems = "UniqueItems" + inclusiveMaximum = "InclusiveMaximum" + exclusiveMaximum = "ExclusiveMaximum" + exclusiveMinimum = "ExclusiveMinimum" + inclusiveMinimum = "InclusiveMinimum" +) + +// Validate method validates constraints on parameter +// passed in validation array. +func validate(m []validation) error { + for _, item := range m { + v := reflect.ValueOf(item.targetValue) + for _, constraint := range item.constraints { + var err error + switch v.Kind() { + case reflect.Ptr: + err = validatePtr(v, constraint) + case reflect.String: + err = validateString(v, constraint) + case reflect.Struct: + err = validateStruct(v, constraint) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + err = validateInt(v, constraint) + case reflect.Float32, reflect.Float64: + err = validateFloat(v, constraint) + case reflect.Array, reflect.Slice, reflect.Map: + err = validateArrayMap(v, constraint) + default: + err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) + } + if err != nil { + return err + } + } + } + return nil +} + +func validateStruct(x reflect.Value, v constraint, name ...string) error { + //Get field name from target name which is in format a.b.c + s := strings.Split(v.target, ".") + f := x.FieldByName(s[len(s)-1]) + if isZero(f) { + return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.target)) + } + err := validate([]validation{ + { + targetValue: getInterfaceValue(f), + constraints: []constraint{v}, + }, + }) + return err +} + +func validatePtr(x reflect.Value, v constraint) error { + if v.name == readOnly { + if !x.IsNil() { + return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") + } + return nil + } + if x.IsNil() { + return checkNil(x, v) + } + if v.chain != nil { + return validate([]validation{ + { + targetValue: getInterfaceValue(x.Elem()), + constraints: v.chain, + }, + }) + } + return nil +} + +func validateInt(x reflect.Value, v constraint) error { + i := x.Int() + r, ok := v.rule.(int) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) + } + switch v.name { + case multipleOf: + if i%int64(r) != 0 { + return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) + } + case exclusiveMinimum: + if i <= int64(r) { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case exclusiveMaximum: + if i >= int64(r) { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case inclusiveMinimum: + if i < int64(r) { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case inclusiveMaximum: + if i > int64(r) { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.name)) + } + return nil +} + +func validateFloat(x reflect.Value, v constraint) error { + f := x.Float() + r, ok := v.rule.(float64) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.name, v.rule)) + } + switch v.name { + case exclusiveMinimum: + if f <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case exclusiveMaximum: + if f >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case inclusiveMinimum: + if f < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case inclusiveMaximum: + if f > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.name)) + } + return nil +} + +func validateString(x reflect.Value, v constraint) error { + s := x.String() + switch v.name { + case empty: + if len(s) == 0 { + return checkEmpty(x, v) + } + case pattern: + reg, err := regexp.Compile(v.rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + if !reg.MatchString(s) { + return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.rule)) + } + case maxLength: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) + } + if len(s) > v.rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be less than %v", v.rule)) + } + case minLength: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) + } + if len(s) < v.rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.rule)) + } + case readOnly: + if len(s) > 0 { + return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.name)) + } + if v.chain != nil { + return validate([]validation{ + { + targetValue: getInterfaceValue(x), + constraints: v.chain, + }, + }) + } + return nil +} + +func validateArrayMap(x reflect.Value, v constraint) error { + switch v.name { + case null: + if x.IsNil() { + return checkNil(x, v) + } + case empty: + if x.IsNil() || x.Len() == 0 { + return checkEmpty(x, v) + } + case maxItems: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) + } + if x.Len() > v.rule.(int) { + return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.rule, x.Len())) + } + case minItems: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) + } + if x.Len() < v.rule.(int) { + return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.rule, x.Len())) + } + case uniqueItems: + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + if !checkForUniqueInArray(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) + } + } else if x.Kind() == reflect.Map { + if !checkForUniqueInMap(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) + } + } else { + return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.name, x.Kind())) + } + case readOnly: + if x.Len() != 0 { + return createError(x, v, "readonly parameter; must send as nil or empty in request") + } + case pattern: + reg, err := regexp.Compile(v.rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + keys := x.MapKeys() + for _, k := range keys { + if !reg.MatchString(k.String()) { + return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.rule)) + } + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.name)) + } + if v.chain != nil { + return validate([]validation{ + { + targetValue: getInterfaceValue(x), + constraints: v.chain, + }, + }) + } + return nil +} + +func checkNil(x reflect.Value, v constraint) error { + if _, ok := v.rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) + } + if v.rule.(bool) { + return createError(x, v, "value can not be null; required parameter") + } + return nil +} + +func checkEmpty(x reflect.Value, v constraint) error { + if _, ok := v.rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) + } + if v.rule.(bool) { + return createError(x, v, "value can not be null or empty; required parameter") + } + return nil +} + +func checkForUniqueInArray(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + arrOfInterface := make([]interface{}, x.Len()) + for i := 0; i < x.Len(); i++ { + arrOfInterface[i] = x.Index(i).Interface() + } + m := make(map[interface{}]bool) + for _, val := range arrOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func checkForUniqueInMap(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + mapOfInterface := make(map[interface{}]interface{}, x.Len()) + keys := x.MapKeys() + for _, k := range keys { + mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() + } + m := make(map[interface{}]bool) + for _, val := range mapOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func getInterfaceValue(x reflect.Value) interface{} { + if x.Kind() == reflect.Invalid { + return nil + } + return x.Interface() +} + +func isZero(x interface{}) bool { + return x == reflect.Zero(reflect.TypeOf(x)).Interface() +} + +func createError(x reflect.Value, v constraint, message string) error { + return pipeline.NewError(nil, fmt.Sprintf("validation failed: parameter=%s constraint=%s value=%#v details: %s", + v.target, v.name, getInterfaceValue(x), message)) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go new file mode 100644 index 000000000..760271a42 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go @@ -0,0 +1,14 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/0.0.0 azblob/2018-03-28" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return "0.0.0" +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go new file mode 100644 index 000000000..8c7f59453 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go @@ -0,0 +1,242 @@ +package azblob + +import ( + "context" + "io" + "net/http" + "time" +) + +// BlobHTTPHeaders contains read/writeable blob properties. +type BlobHTTPHeaders struct { + ContentType string + ContentMD5 []byte + ContentEncoding string + ContentLanguage string + ContentDisposition string + CacheControl string +} + +// NewHTTPHeaders returns the user-modifiable properties for this blob. +func (bgpr BlobGetPropertiesResponse) NewHTTPHeaders() BlobHTTPHeaders { + return BlobHTTPHeaders{ + ContentType: bgpr.ContentType(), + ContentEncoding: bgpr.ContentEncoding(), + ContentLanguage: bgpr.ContentLanguage(), + ContentDisposition: bgpr.ContentDisposition(), + CacheControl: bgpr.CacheControl(), + ContentMD5: bgpr.ContentMD5(), + } +} + +/////////////////////////////////////////////////////////////////////////////// + +// NewHTTPHeaders returns the user-modifiable properties for this blob. +func (dr downloadResponse) NewHTTPHeaders() BlobHTTPHeaders { + return BlobHTTPHeaders{ + ContentType: dr.ContentType(), + ContentEncoding: dr.ContentEncoding(), + ContentLanguage: dr.ContentLanguage(), + ContentDisposition: dr.ContentDisposition(), + CacheControl: dr.CacheControl(), + ContentMD5: dr.ContentMD5(), + } +} + +/////////////////////////////////////////////////////////////////////////////// + +// DownloadResponse wraps AutoRest generated downloadResponse and helps to provide info for retry. +type DownloadResponse struct { + r *downloadResponse + ctx context.Context + b BlobURL + getInfo HTTPGetterInfo +} + +// Body constructs new RetryReader stream for reading data. If a connection failes +// while reading, it will make additional requests to reestablish a connection and +// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0 +// (the default), returns the original response body and no retries will be performed. +func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser { + if o.MaxRetryRequests == 0 { // No additional retries + return r.Response().Body + } + return NewRetryReader(r.ctx, r.Response(), r.getInfo, o, + func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) { + resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count, + BlobAccessConditions{ + ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: getInfo.ETag}, + }, + false) + if err != nil { + return nil, err + } + return resp.Response(), err + }, + ) +} + +// Response returns the raw HTTP response object. +func (r DownloadResponse) Response() *http.Response { + return r.r.Response() +} + +// NewHTTPHeaders returns the user-modifiable properties for this blob. +func (r DownloadResponse) NewHTTPHeaders() BlobHTTPHeaders { + return r.r.NewHTTPHeaders() +} + +// BlobContentMD5 returns the value for header x-ms-blob-content-md5. +func (r DownloadResponse) BlobContentMD5() []byte { + return r.r.BlobContentMD5() +} + +// ContentMD5 returns the value for header Content-MD5. +func (r DownloadResponse) ContentMD5() []byte { + return r.r.ContentMD5() +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (r DownloadResponse) StatusCode() int { + return r.r.StatusCode() +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (r DownloadResponse) Status() string { + return r.r.Status() +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (r DownloadResponse) AcceptRanges() string { + return r.r.AcceptRanges() +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (r DownloadResponse) BlobCommittedBlockCount() int32 { + return r.r.BlobCommittedBlockCount() +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (r DownloadResponse) BlobSequenceNumber() int64 { + return r.r.BlobSequenceNumber() +} + +// BlobType returns the value for header x-ms-blob-type. +func (r DownloadResponse) BlobType() BlobType { + return r.r.BlobType() +} + +// CacheControl returns the value for header Cache-Control. +func (r DownloadResponse) CacheControl() string { + return r.r.CacheControl() +} + +// ContentDisposition returns the value for header Content-Disposition. +func (r DownloadResponse) ContentDisposition() string { + return r.r.ContentDisposition() +} + +// ContentEncoding returns the value for header Content-Encoding. +func (r DownloadResponse) ContentEncoding() string { + return r.r.ContentEncoding() +} + +// ContentLanguage returns the value for header Content-Language. +func (r DownloadResponse) ContentLanguage() string { + return r.r.ContentLanguage() +} + +// ContentLength returns the value for header Content-Length. +func (r DownloadResponse) ContentLength() int64 { + return r.r.ContentLength() +} + +// ContentRange returns the value for header Content-Range. +func (r DownloadResponse) ContentRange() string { + return r.r.ContentRange() +} + +// ContentType returns the value for header Content-Type. +func (r DownloadResponse) ContentType() string { + return r.r.ContentType() +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (r DownloadResponse) CopyCompletionTime() time.Time { + return r.r.CopyCompletionTime() +} + +// CopyID returns the value for header x-ms-copy-id. +func (r DownloadResponse) CopyID() string { + return r.r.CopyID() +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (r DownloadResponse) CopyProgress() string { + return r.r.CopyProgress() +} + +// CopySource returns the value for header x-ms-copy-source. +func (r DownloadResponse) CopySource() string { + return r.r.CopySource() +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (r DownloadResponse) CopyStatus() CopyStatusType { + return r.r.CopyStatus() +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (r DownloadResponse) CopyStatusDescription() string { + return r.r.CopyStatusDescription() +} + +// Date returns the value for header Date. +func (r DownloadResponse) Date() time.Time { + return r.r.Date() +} + +// ETag returns the value for header ETag. +func (r DownloadResponse) ETag() ETag { + return r.r.ETag() +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (r DownloadResponse) IsServerEncrypted() string { + return r.r.IsServerEncrypted() +} + +// LastModified returns the value for header Last-Modified. +func (r DownloadResponse) LastModified() time.Time { + return r.r.LastModified() +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (r DownloadResponse) LeaseDuration() LeaseDurationType { + return r.r.LeaseDuration() +} + +// LeaseState returns the value for header x-ms-lease-state. +func (r DownloadResponse) LeaseState() LeaseStateType { + return r.r.LeaseState() +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (r DownloadResponse) LeaseStatus() LeaseStatusType { + return r.r.LeaseStatus() +} + +// RequestID returns the value for header x-ms-request-id. +func (r DownloadResponse) RequestID() string { + return r.r.RequestID() +} + +// Version returns the value for header x-ms-version. +func (r DownloadResponse) Version() string { + return r.r.Version() +} + +// NewMetadata returns user-defined key/value pairs. +func (r DownloadResponse) NewMetadata() Metadata { + return r.r.NewMetadata() +} diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE deleted file mode 100644 index b9d6a27ea..000000000 --- a/vendor/github.com/Azure/go-autorest/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go deleted file mode 100644 index bee5e61dd..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ /dev/null @@ -1,81 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/url" -) - -const ( - activeDirectoryAPIVersion = "1.0" -) - -// OAuthConfig represents the endpoints needed -// in OAuth operations -type OAuthConfig struct { - AuthorityEndpoint url.URL `json:"authorityEndpoint"` - AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` - TokenEndpoint url.URL `json:"tokenEndpoint"` - DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` -} - -// IsZero returns true if the OAuthConfig object is zero-initialized. -func (oac OAuthConfig) IsZero() bool { - return oac == OAuthConfig{} -} - -func validateStringParam(param, name string) error { - if len(param) == 0 { - return fmt.Errorf("parameter '" + name + "' cannot be empty") - } - return nil -} - -// NewOAuthConfig returns an OAuthConfig with tenant specific urls -func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { - if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { - return nil, err - } - // it's legal for tenantID to be empty so don't validate it - const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" - u, err := url.Parse(activeDirectoryEndpoint) - if err != nil { - return nil, err - } - authorityURL, err := u.Parse(tenantID) - if err != nil { - return nil, err - } - authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - - return &OAuthConfig{ - AuthorityEndpoint: *authorityURL, - AuthorizeEndpoint: *authorizeURL, - TokenEndpoint: *tokenURL, - DeviceCodeEndpoint: *deviceCodeURL, - }, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go deleted file mode 100644 index b38f4c245..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go +++ /dev/null @@ -1,242 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* - This file is largely based on rjw57/oauth2device's code, with the follow differences: - * scope -> resource, and only allow a single one - * receive "Message" in the DeviceCode struct and show it to users as the prompt - * azure-xplat-cli has the following behavior that this emulates: - - does not send client_secret during the token exchange - - sends resource again in the token exchange request -*/ - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" -) - -const ( - logPrefix = "autorest/adal/devicetoken:" -) - -var ( - // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow - ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) - - // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow - ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) - - // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow - ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) - - // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow - ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) - - // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow - ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) - - // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow - ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) - - // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow - ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) - - errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" - errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" - errTokenSendingFails = "Error occurred while sending request with device code for a token" - errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" - errStatusNotOK = "Error HTTP status != 200" -) - -// DeviceCode is the object returned by the device auth endpoint -// It contains information to instruct the user to complete the auth flow -type DeviceCode struct { - DeviceCode *string `json:"device_code,omitempty"` - UserCode *string `json:"user_code,omitempty"` - VerificationURL *string `json:"verification_url,omitempty"` - ExpiresIn *int64 `json:"expires_in,string,omitempty"` - Interval *int64 `json:"interval,string,omitempty"` - - Message *string `json:"message"` // Azure specific - Resource string // store the following, stored when initiating, used when exchanging - OAuthConfig OAuthConfig - ClientID string -} - -// TokenError is the object returned by the token exchange endpoint -// when something is amiss -type TokenError struct { - Error *string `json:"error,omitempty"` - ErrorCodes []int `json:"error_codes,omitempty"` - ErrorDescription *string `json:"error_description,omitempty"` - Timestamp *string `json:"timestamp,omitempty"` - TraceID *string `json:"trace_id,omitempty"` -} - -// DeviceToken is the object return by the token exchange endpoint -// It can either look like a Token or an ErrorToken, so put both here -// and check for presence of "Error" to know if we are in error state -type deviceToken struct { - Token - TokenError -} - -// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode -// that can be used with CheckForUserCompletion or WaitForUserCompletion. -func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - v := url.Values{ - "client_id": []string{clientID}, - "resource": []string{resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) - } - - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrDeviceCodeEmpty - } - - var code DeviceCode - err = json.Unmarshal(rb, &code) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - code.ClientID = clientID - code.Resource = resource - code.OAuthConfig = oauthConfig - - return &code, nil -} - -// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint -// to see if the device flow has: been completed, timed out, or otherwise failed -func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - v := url.Values{ - "client_id": []string{code.ClientID}, - "code": []string{*code.DeviceCode}, - "grant_type": []string{OAuthGrantTypeDeviceCode}, - "resource": []string{code.Resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrOAuthTokenEmpty - } - - var token deviceToken - err = json.Unmarshal(rb, &token) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if token.Error == nil { - return &token.Token, nil - } - - switch *token.Error { - case "authorization_pending": - return nil, ErrDeviceAuthorizationPending - case "slow_down": - return nil, ErrDeviceSlowDown - case "access_denied": - return nil, ErrDeviceAccessDenied - case "code_expired": - return nil, ErrDeviceCodeExpired - default: - return nil, ErrDeviceGeneric - } -} - -// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. -// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - intervalDuration := time.Duration(*code.Interval) * time.Second - waitDuration := intervalDuration - - for { - token, err := CheckForUserCompletion(sender, code) - - if err == nil { - return token, nil - } - - switch err { - case ErrDeviceSlowDown: - waitDuration += waitDuration - case ErrDeviceAuthorizationPending: - // noop - default: // everything else is "fatal" to us - return nil, err - } - - if waitDuration > (intervalDuration * 3) { - return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) - } - - time.Sleep(waitDuration) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go deleted file mode 100644 index 9e15f2751..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go +++ /dev/null @@ -1,73 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// LoadToken restores a Token object from a file located at 'path'. -func LoadToken(path string) (*Token, error) { - file, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) - } - defer file.Close() - - var token Token - - dec := json.NewDecoder(file) - if err = dec.Decode(&token); err != nil { - return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) - } - return &token, nil -} - -// SaveToken persists an oauth token at the given location on disk. -// It moves the new file into place so it can safely be used to replace an existing file -// that maybe accessed by multiple processes. -func SaveToken(path string, mode os.FileMode, token Token) error { - dir := filepath.Dir(path) - err := os.MkdirAll(dir, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) - } - - newFile, err := ioutil.TempFile(dir, "token") - if err != nil { - return fmt.Errorf("failed to create the temp file to write the token: %v", err) - } - tempPath := newFile.Name() - - if err := json.NewEncoder(newFile).Encode(token); err != nil { - return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) - } - if err := newFile.Close(); err != nil { - return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) - } - - // Atomic replace to avoid multi-writer file corruptions - if err := os.Rename(tempPath, path); err != nil { - return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) - } - if err := os.Chmod(path, mode); err != nil { - return fmt.Errorf("failed to chmod the token file %s: %v", path, err) - } - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go deleted file mode 100644 index 0e5ad14d3..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ /dev/null @@ -1,60 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "net/http" -) - -const ( - contentType = "Content-Type" - mimeTypeFormPost = "application/x-www-form-urlencoded" -) - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(&http.Client{}, decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go deleted file mode 100644 index 32aea8389..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ /dev/null @@ -1,968 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "math" - "net" - "net/http" - "net/url" - "strconv" - "strings" - "sync" - "time" - - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/version" - "github.com/dgrijalva/jwt-go" -) - -const ( - defaultRefresh = 5 * time.Minute - - // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow - OAuthGrantTypeDeviceCode = "device_code" - - // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows - OAuthGrantTypeClientCredentials = "client_credentials" - - // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows - OAuthGrantTypeUserPass = "password" - - // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows - OAuthGrantTypeRefreshToken = "refresh_token" - - // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows - OAuthGrantTypeAuthorizationCode = "authorization_code" - - // metadataHeader is the header required by MSI extension - metadataHeader = "Metadata" - - // msiEndpoint is the well known endpoint for getting MSI authentications tokens - msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" - - // the default number of attempts to refresh an MSI authentication token - defaultMaxMSIRefreshAttempts = 5 -) - -// OAuthTokenProvider is an interface which should be implemented by an access token retriever -type OAuthTokenProvider interface { - OAuthToken() string -} - -// TokenRefreshError is an interface used by errors returned during token refresh. -type TokenRefreshError interface { - error - Response() *http.Response -} - -// Refresher is an interface for token refresh functionality -type Refresher interface { - Refresh() error - RefreshExchange(resource string) error - EnsureFresh() error -} - -// RefresherWithContext is an interface for token refresh functionality -type RefresherWithContext interface { - RefreshWithContext(ctx context.Context) error - RefreshExchangeWithContext(ctx context.Context, resource string) error - EnsureFreshWithContext(ctx context.Context) error -} - -// TokenRefreshCallback is the type representing callbacks that will be called after -// a successful token refresh -type TokenRefreshCallback func(Token) error - -// Token encapsulates the access token used to authorize Azure requests. -type Token struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - - ExpiresIn string `json:"expires_in"` - ExpiresOn string `json:"expires_on"` - NotBefore string `json:"not_before"` - - Resource string `json:"resource"` - Type string `json:"token_type"` -} - -// IsZero returns true if the token object is zero-initialized. -func (t Token) IsZero() bool { - return t == Token{} -} - -// Expires returns the time.Time when the Token expires. -func (t Token) Expires() time.Time { - s, err := strconv.Atoi(t.ExpiresOn) - if err != nil { - s = -3600 - } - - expiration := date.NewUnixTimeFromSeconds(float64(s)) - - return time.Time(expiration).UTC() -} - -// IsExpired returns true if the Token is expired, false otherwise. -func (t Token) IsExpired() bool { - return t.WillExpireIn(0) -} - -// WillExpireIn returns true if the Token will expire after the passed time.Duration interval -// from now, false otherwise. -func (t Token) WillExpireIn(d time.Duration) bool { - return !t.Expires().After(time.Now().Add(d)) -} - -//OAuthToken return the current access token -func (t *Token) OAuthToken() string { - return t.AccessToken -} - -// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form -// that is submitted when acquiring an oAuth token. -type ServicePrincipalSecret interface { - SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error -} - -// ServicePrincipalNoSecret represents a secret type that contains no secret -// meaning it is not valid for fetching a fresh token. This is used by Manual -type ServicePrincipalNoSecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret -// It only returns an error for the ServicePrincipalNoSecret type -func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") -} - -// MarshalJSON implements the json.Marshaler interface. -func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalNoSecret", - }) -} - -// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. -type ServicePrincipalTokenSecret struct { - ClientSecret string `json:"value"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using the client_secret. -func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("client_secret", tokenSecret.ClientSecret) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Value string `json:"value"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalTokenSecret", - Value: tokenSecret.ClientSecret, - }) -} - -// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. -type ServicePrincipalCertificateSecret struct { - Certificate *x509.Certificate - PrivateKey *rsa.PrivateKey -} - -// SignJwt returns the JWT signed with the certificate's private key. -func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { - hasher := sha1.New() - _, err := hasher.Write(secret.Certificate.Raw) - if err != nil { - return "", err - } - - thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) - - // The jti (JWT ID) claim provides a unique identifier for the JWT. - jti := make([]byte, 20) - _, err = rand.Read(jti) - if err != nil { - return "", err - } - - token := jwt.New(jwt.SigningMethodRS256) - token.Header["x5t"] = thumbprint - token.Claims = jwt.MapClaims{ - "aud": spt.inner.OauthConfig.TokenEndpoint.String(), - "iss": spt.inner.ClientID, - "sub": spt.inner.ClientID, - "jti": base64.URLEncoding.EncodeToString(jti), - "nbf": time.Now().Unix(), - "exp": time.Now().Add(time.Hour * 24).Unix(), - } - - signedString, err := token.SignedString(secret.PrivateKey) - return signedString, err -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. -func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - jwt, err := secret.SignJwt(spt) - if err != nil { - return err - } - - v.Set("client_assertion", jwt) - v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") -} - -// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. -type ServicePrincipalMSISecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") -} - -// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. -type ServicePrincipalUsernamePasswordSecret struct { - Username string `json:"username"` - Password string `json:"password"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("username", secret.Username) - v.Set("password", secret.Password) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Username string `json:"username"` - Password string `json:"password"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalUsernamePasswordSecret", - Username: secret.Username, - Password: secret.Password, - }) -} - -// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. -type ServicePrincipalAuthorizationCodeSecret struct { - ClientSecret string `json:"value"` - AuthorizationCode string `json:"authCode"` - RedirectURI string `json:"redirect"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("code", secret.AuthorizationCode) - v.Set("client_secret", secret.ClientSecret) - v.Set("redirect_uri", secret.RedirectURI) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Value string `json:"value"` - AuthCode string `json:"authCode"` - Redirect string `json:"redirect"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalAuthorizationCodeSecret", - Value: secret.ClientSecret, - AuthCode: secret.AuthorizationCode, - Redirect: secret.RedirectURI, - }) -} - -// ServicePrincipalToken encapsulates a Token created for a Service Principal. -type ServicePrincipalToken struct { - inner servicePrincipalToken - refreshLock *sync.RWMutex - sender Sender - refreshCallbacks []TokenRefreshCallback - // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. - MaxMSIRefreshAttempts int -} - -// MarshalTokenJSON returns the marshalled inner token. -func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { - return json.Marshal(spt.inner.Token) -} - -// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. -func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { - spt.refreshCallbacks = callbacks -} - -// MarshalJSON implements the json.Marshaler interface. -func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { - return json.Marshal(spt.inner) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { - // need to determine the token type - raw := map[string]interface{}{} - err := json.Unmarshal(data, &raw) - if err != nil { - return err - } - secret := raw["secret"].(map[string]interface{}) - switch secret["type"] { - case "ServicePrincipalNoSecret": - spt.inner.Secret = &ServicePrincipalNoSecret{} - case "ServicePrincipalTokenSecret": - spt.inner.Secret = &ServicePrincipalTokenSecret{} - case "ServicePrincipalCertificateSecret": - return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") - case "ServicePrincipalMSISecret": - return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") - case "ServicePrincipalUsernamePasswordSecret": - spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} - case "ServicePrincipalAuthorizationCodeSecret": - spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} - default: - return fmt.Errorf("unrecognized token type '%s'", secret["type"]) - } - err = json.Unmarshal(data, &spt.inner) - if err != nil { - return err - } - spt.refreshLock = &sync.RWMutex{} - spt.sender = &http.Client{} - return nil -} - -// internal type used for marshalling/unmarshalling -type servicePrincipalToken struct { - Token Token `json:"token"` - Secret ServicePrincipalSecret `json:"secret"` - OauthConfig OAuthConfig `json:"oauth"` - ClientID string `json:"clientID"` - Resource string `json:"resource"` - AutoRefresh bool `json:"autoRefresh"` - RefreshWithin time.Duration `json:"refreshWithin"` -} - -func validateOAuthConfig(oac OAuthConfig) error { - if oac.IsZero() { - return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") - } - return nil -} - -// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. -func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(id, "id"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if secret == nil { - return nil, fmt.Errorf("parameter 'secret' cannot be nil") - } - spt := &ServicePrincipalToken{ - inner: servicePrincipalToken{ - OauthConfig: oauthConfig, - Secret: secret, - ClientID: id, - Resource: resource, - AutoRefresh: true, - RefreshWithin: defaultRefresh, - }, - refreshLock: &sync.RWMutex{}, - sender: &http.Client{}, - refreshCallbacks: callbacks, - } - return spt, nil -} - -// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token -func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if token.IsZero() { - return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") - } - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalNoSecret{}, - callbacks...) - if err != nil { - return nil, err - } - - spt.inner.Token = token - - return spt, nil -} - -// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret -func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if secret == nil { - return nil, fmt.Errorf("parameter 'secret' cannot be nil") - } - if token.IsZero() { - return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") - } - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - secret, - callbacks...) - if err != nil { - return nil, err - } - - spt.inner.Token = token - - return spt, nil -} - -// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal -// credentials scoped to the named resource. -func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(secret, "secret"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalTokenSecret{ - ClientSecret: secret, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. -func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if certificate == nil { - return nil, fmt.Errorf("parameter 'certificate' cannot be nil") - } - if privateKey == nil { - return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalCertificateSecret{ - PrivateKey: privateKey, - Certificate: certificate, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. -func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(username, "username"); err != nil { - return nil, err - } - if err := validateStringParam(password, "password"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalUsernamePasswordSecret{ - Username: username, - Password: password, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the -func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(clientSecret, "clientSecret"); err != nil { - return nil, err - } - if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { - return nil, err - } - if err := validateStringParam(redirectURI, "redirectURI"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalAuthorizationCodeSecret{ - ClientSecret: clientSecret, - AuthorizationCode: authorizationCode, - RedirectURI: redirectURI, - }, - callbacks..., - ) -} - -// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. -func GetMSIVMEndpoint() (string, error) { - return msiEndpoint, nil -} - -// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the system assigned identity when creating the token. -func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) -} - -// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the specified user assigned identity when creating the token. -func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) -} - -func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if userAssignedID != nil { - if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { - return nil, err - } - } - // We set the oauth config token endpoint to be MSI's endpoint - msiEndpointURL, err := url.Parse(msiEndpoint) - if err != nil { - return nil, err - } - - v := url.Values{} - v.Set("resource", resource) - v.Set("api-version", "2018-02-01") - if userAssignedID != nil { - v.Set("client_id", *userAssignedID) - } - msiEndpointURL.RawQuery = v.Encode() - - spt := &ServicePrincipalToken{ - inner: servicePrincipalToken{ - OauthConfig: OAuthConfig{ - TokenEndpoint: *msiEndpointURL, - }, - Secret: &ServicePrincipalMSISecret{}, - Resource: resource, - AutoRefresh: true, - RefreshWithin: defaultRefresh, - }, - refreshLock: &sync.RWMutex{}, - sender: &http.Client{}, - refreshCallbacks: callbacks, - MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, - } - - if userAssignedID != nil { - spt.inner.ClientID = *userAssignedID - } - - return spt, nil -} - -// internal type that implements TokenRefreshError -type tokenRefreshError struct { - message string - resp *http.Response -} - -// Error implements the error interface which is part of the TokenRefreshError interface. -func (tre tokenRefreshError) Error() string { - return tre.message -} - -// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. -func (tre tokenRefreshError) Response() *http.Response { - return tre.resp -} - -func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { - return tokenRefreshError{message: message, resp: resp} -} - -// EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (spt *ServicePrincipalToken) EnsureFresh() error { - return spt.EnsureFreshWithContext(context.Background()) -} - -// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { - // take the write lock then check to see if the token was already refreshed - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { - return spt.refreshInternal(ctx, spt.inner.Resource) - } - } - return nil -} - -// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization -func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { - if spt.refreshCallbacks != nil { - for _, callback := range spt.refreshCallbacks { - err := callback(spt.inner.Token) - if err != nil { - return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) - } - } - } - return nil -} - -// Refresh obtains a fresh token for the Service Principal. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) Refresh() error { - return spt.RefreshWithContext(context.Background()) -} - -// RefreshWithContext obtains a fresh token for the Service Principal. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - return spt.refreshInternal(ctx, spt.inner.Resource) -} - -// RefreshExchange refreshes the token, but for a different resource. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { - return spt.RefreshExchangeWithContext(context.Background(), resource) -} - -// RefreshExchangeWithContext refreshes the token, but for a different resource. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - return spt.refreshInternal(ctx, resource) -} - -func (spt *ServicePrincipalToken) getGrantType() string { - switch spt.inner.Secret.(type) { - case *ServicePrincipalUsernamePasswordSecret: - return OAuthGrantTypeUserPass - case *ServicePrincipalAuthorizationCodeSecret: - return OAuthGrantTypeAuthorizationCode - default: - return OAuthGrantTypeClientCredentials - } -} - -func isIMDS(u url.URL) bool { - imds, err := url.Parse(msiEndpoint) - if err != nil { - return false - } - return u.Host == imds.Host && u.Path == imds.Path -} - -func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { - req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) - if err != nil { - return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) - } - req.Header.Add("User-Agent", version.UserAgent()) - req = req.WithContext(ctx) - if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { - v := url.Values{} - v.Set("client_id", spt.inner.ClientID) - v.Set("resource", resource) - - if spt.inner.Token.RefreshToken != "" { - v.Set("grant_type", OAuthGrantTypeRefreshToken) - v.Set("refresh_token", spt.inner.Token.RefreshToken) - // web apps must specify client_secret when refreshing tokens - // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens - if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { - err := spt.inner.Secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - } else { - v.Set("grant_type", spt.getGrantType()) - err := spt.inner.Secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - req.Body = body - } - - if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { - req.Method = http.MethodGet - req.Header.Set(metadataHeader, "true") - } - - var resp *http.Response - if isIMDS(spt.inner.OauthConfig.TokenEndpoint) { - resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) - } else { - resp, err = spt.sender.Do(req) - } - if err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: Failed to execute the refresh request. Error = '%v'", err), nil) - } - - defer resp.Body.Close() - rb, err := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != http.StatusOK { - if err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp) - } - return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) - } - - // for the following error cases don't return a TokenRefreshError. the operation succeeded - // but some transient failure happened during deserialization. by returning a generic error - // the retry logic will kick in (we don't retry on TokenRefreshError). - - if err != nil { - return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return fmt.Errorf("adal: Empty service principal token received during refresh") - } - var token Token - err = json.Unmarshal(rb, &token) - if err != nil { - return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) - } - - spt.inner.Token = token - - return spt.InvokeRefreshCallbacks(token) -} - -// retry logic specific to retrieving a token from the IMDS endpoint -func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { - // copied from client.go due to circular dependency - retries := []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } - // extra retry status codes specific to IMDS - retries = append(retries, - http.StatusNotFound, - http.StatusGone, - // all remaining 5xx - http.StatusNotImplemented, - http.StatusHTTPVersionNotSupported, - http.StatusVariantAlsoNegotiates, - http.StatusInsufficientStorage, - http.StatusLoopDetected, - http.StatusNotExtended, - http.StatusNetworkAuthenticationRequired) - - // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance - - const maxDelay time.Duration = 60 * time.Second - - attempt := 0 - delay := time.Duration(0) - - for attempt < maxAttempts { - resp, err = sender.Do(req) - // retry on temporary network errors, e.g. transient network failures. - // if we don't receive a response then assume we can't connect to the - // endpoint so we're likely not running on an Azure VM so don't retry. - if (err != nil && !isTemporaryNetworkError(err)) || resp == nil || resp.StatusCode == http.StatusOK || !containsInt(retries, resp.StatusCode) { - return - } - - // perform exponential backoff with a cap. - // must increment attempt before calculating delay. - attempt++ - // the base value of 2 is the "delta backoff" as specified in the guidance doc - delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) - if delay > maxDelay { - delay = maxDelay - } - - select { - case <-time.After(delay): - // intentionally left blank - case <-req.Context().Done(): - err = req.Context().Err() - return - } - } - return -} - -// returns true if the specified error is a temporary network error or false if it's not. -// if the error doesn't implement the net.Error interface the return value is true. -func isTemporaryNetworkError(err error) bool { - if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { - return true - } - return false -} - -// returns true if slice ints contains the value n -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true - } - } - return false -} - -// SetAutoRefresh enables or disables automatic refreshing of stale tokens. -func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { - spt.inner.AutoRefresh = autoRefresh -} - -// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will -// refresh the token. -func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { - spt.inner.RefreshWithin = d - return -} - -// SetSender sets the http.Client used when obtaining the Service Principal token. An -// undecorated http.Client is used by default. -func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } - -// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. -func (spt *ServicePrincipalToken) OAuthToken() string { - spt.refreshLock.RLock() - defer spt.refreshLock.RUnlock() - return spt.inner.Token.OAuthToken() -} - -// Token returns a copy of the current token. -func (spt *ServicePrincipalToken) Token() Token { - spt.refreshLock.RLock() - defer spt.refreshLock.RUnlock() - return spt.inner.Token -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go deleted file mode 100644 index 77eff45bd..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ /dev/null @@ -1,259 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/Azure/go-autorest/autorest/adal" -) - -const ( - bearerChallengeHeader = "Www-Authenticate" - bearer = "Bearer" - tenantID = "tenantID" - apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" - bingAPISdkHeader = "X-BingApis-SDK-Client" - golangBingAPISdkHeaderValue = "Go-SDK" -) - -// Authorizer is the interface that provides a PrepareDecorator used to supply request -// authorization. Most often, the Authorizer decorator runs last so it has access to the full -// state of the formed HTTP request. -type Authorizer interface { - WithAuthorization() PrepareDecorator -} - -// NullAuthorizer implements a default, "do nothing" Authorizer. -type NullAuthorizer struct{} - -// WithAuthorization returns a PrepareDecorator that does nothing. -func (na NullAuthorizer) WithAuthorization() PrepareDecorator { - return WithNothing() -} - -// APIKeyAuthorizer implements API Key authorization. -type APIKeyAuthorizer struct { - headers map[string]interface{} - queryParameters map[string]interface{} -} - -// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. -func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { - return NewAPIKeyAuthorizer(headers, nil) -} - -// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. -func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { - return NewAPIKeyAuthorizer(nil, queryParameters) -} - -// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. -func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { - return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters -func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) - } -} - -// CognitiveServicesAuthorizer implements authorization for Cognitive Services. -type CognitiveServicesAuthorizer struct { - subscriptionKey string -} - -// NewCognitiveServicesAuthorizer is -func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { - return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} -} - -// WithAuthorization is -func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { - headers := make(map[string]interface{}) - headers[apiKeyAuthorizerHeader] = csa.subscriptionKey - headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue - - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// BearerAuthorizer implements the bearer authorization -type BearerAuthorizer struct { - tokenProvider adal.OAuthTokenProvider -} - -// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider -func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { - return &BearerAuthorizer{tokenProvider: tp} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the token. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - // the ordering is important here, prefer RefresherWithContext if available - if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { - err = refresher.EnsureFreshWithContext(r.Context()) - } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { - err = refresher.EnsureFresh() - } - if err != nil { - var resp *http.Response - if tokError, ok := err.(adal.TokenRefreshError); ok { - resp = tokError.Response() - } - return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, - "Failed to refresh the Token for request to %s", r.URL) - } - return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) - } - return r, err - }) - } -} - -// BearerAuthorizerCallbackFunc is the authentication callback signature. -type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) - -// BearerAuthorizerCallback implements bearer authorization via a callback. -type BearerAuthorizerCallback struct { - sender Sender - callback BearerAuthorizerCallbackFunc -} - -// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback -// is invoked when the HTTP request is submitted. -func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { - if sender == nil { - sender = &http.Client{} - } - return &BearerAuthorizerCallback{sender: sender, callback: callback} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value -// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - // make a copy of the request and remove the body as it's not - // required and avoids us having to create a copy of it. - rCopy := *r - removeRequestBody(&rCopy) - - resp, err := bacb.sender.Do(&rCopy) - if err == nil && resp.StatusCode == 401 { - defer resp.Body.Close() - if hasBearerChallenge(resp) { - bc, err := newBearerChallenge(resp) - if err != nil { - return r, err - } - if bacb.callback != nil { - ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) - if err != nil { - return r, err - } - return Prepare(r, ba.WithAuthorization()) - } - } - } - } - return r, err - }) - } -} - -// returns true if the HTTP response contains a bearer challenge -func hasBearerChallenge(resp *http.Response) bool { - authHeader := resp.Header.Get(bearerChallengeHeader) - if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { - return false - } - return true -} - -type bearerChallenge struct { - values map[string]string -} - -func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { - challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) - trimmedChallenge := challenge[len(bearer)+1:] - - // challenge is a set of key=value pairs that are comma delimited - pairs := strings.Split(trimmedChallenge, ",") - if len(pairs) < 1 { - err = fmt.Errorf("challenge '%s' contains no pairs", challenge) - return bc, err - } - - bc.values = make(map[string]string) - for i := range pairs { - trimmedPair := strings.TrimSpace(pairs[i]) - pair := strings.Split(trimmedPair, "=") - if len(pair) == 2 { - // remove the enclosing quotes - key := strings.Trim(pair[0], "\"") - value := strings.Trim(pair[1], "\"") - - switch key { - case "authorization", "authorization_uri": - // strip the tenant ID from the authorization URL - asURL, err := url.Parse(value) - if err != nil { - return bc, err - } - bc.values[tenantID] = asURL.Path[1:] - default: - bc.values[key] = value - } - } - } - - return bc, err -} - -// EventGridKeyAuthorizer implements authorization for event grid using key authentication. -type EventGridKeyAuthorizer struct { - topicKey string -} - -// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer -// with the specified topic key. -func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { - return EventGridKeyAuthorizer{topicKey: topicKey} -} - -// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. -func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { - headers := map[string]interface{}{ - "aeg-sas-key": egta.topicKey, - } - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go deleted file mode 100644 index aafdf021f..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines -and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) -generated Go code. - -The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, -and Responding. A typical pattern is: - - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) - - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) - - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) - -Each phase relies on decorators to modify and / or manage processing. Decorators may first modify -and then pass the data along, pass the data first and then modify the result, or wrap themselves -around passing the data (such as a logger might do). Decorators run in the order provided. For -example, the following: - - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) - -will set the URL to: - - https://microsoft.com/a/b/c - -Preparers and Responders may be shared and re-used (assuming the underlying decorators support -sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders -shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, -all bound together by means of input / output channels. - -Decorators hold their passed state within a closure (such as the path components in the example -above). Be careful to share Preparers and Responders only in a context where such held state -applies. For example, it may not make sense to share a Preparer that applies a query string from a -fixed set of values. Similarly, sharing a Responder that reads the response body into a passed -struct (e.g., ByUnmarshallingJson) is likely incorrect. - -Lastly, the Swagger specification (https://swagger.io) that drives AutoRest -(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The -github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure -correct parsing and formatting. - -Errors raised by autorest objects and methods will conform to the autorest.Error interface. - -See the included examples for more detail. For details on the suggested use of this package by -generated clients, see the Client described below. -*/ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "net/http" - "time" -) - -const ( - // HeaderLocation specifies the HTTP Location header. - HeaderLocation = "Location" - - // HeaderRetryAfter specifies the HTTP Retry-After header. - HeaderRetryAfter = "Retry-After" -) - -// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set -// and false otherwise. -func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { - if resp == nil { - return false - } - return containsInt(codes, resp.StatusCode) -} - -// GetLocation retrieves the URL from the Location header of the passed response. -func GetLocation(resp *http.Response) string { - return resp.Header.Get(HeaderLocation) -} - -// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If -// the header is absent or is malformed, it will return the supplied default delay time.Duration. -func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { - retry := resp.Header.Get(HeaderRetryAfter) - if retry == "" { - return defaultDelay - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - return defaultDelay - } - - return d -} - -// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. -func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare(&http.Request{Cancel: cancel}, - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} - -// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. -func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare((&http.Request{}).WithContext(ctx), - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go deleted file mode 100644 index 9dd7a1d27..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ /dev/null @@ -1,958 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" -) - -const ( - headerAsyncOperation = "Azure-AsyncOperation" -) - -const ( - operationInProgress string = "InProgress" - operationCanceled string = "Canceled" - operationFailed string = "Failed" - operationSucceeded string = "Succeeded" -) - -var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} - -// Future provides a mechanism to access the status and results of an asynchronous request. -// Since futures are stateful they should be passed by value to avoid race conditions. -type Future struct { - req *http.Request // legacy - pt pollingTracker -} - -// NewFuture returns a new Future object initialized with the specified request. -// Deprecated: Please use NewFutureFromResponse instead. -func NewFuture(req *http.Request) Future { - return Future{req: req} -} - -// NewFutureFromResponse returns a new Future object initialized -// with the initial response from an asynchronous operation. -func NewFutureFromResponse(resp *http.Response) (Future, error) { - pt, err := createPollingTracker(resp) - if err != nil { - return Future{}, err - } - return Future{pt: pt}, nil -} - -// Response returns the last HTTP response. -func (f Future) Response() *http.Response { - if f.pt == nil { - return nil - } - return f.pt.latestResponse() -} - -// Status returns the last status message of the operation. -func (f Future) Status() string { - if f.pt == nil { - return "" - } - return f.pt.pollingStatus() -} - -// PollingMethod returns the method used to monitor the status of the asynchronous operation. -func (f Future) PollingMethod() PollingMethodType { - if f.pt == nil { - return PollingUnknown - } - return f.pt.pollingMethod() -} - -// Done queries the service to see if the operation has completed. -func (f *Future) Done(sender autorest.Sender) (bool, error) { - // support for legacy Future implementation - if f.req != nil { - resp, err := sender.Do(f.req) - if err != nil { - return false, err - } - pt, err := createPollingTracker(resp) - if err != nil { - return false, err - } - f.pt = pt - f.req = nil - } - // end legacy - if f.pt == nil { - return false, autorest.NewError("Future", "Done", "future is not initialized") - } - if f.pt.hasTerminated() { - return true, f.pt.pollingError() - } - if err := f.pt.pollForStatus(sender); err != nil { - return false, err - } - if err := f.pt.checkForErrors(); err != nil { - return f.pt.hasTerminated(), err - } - if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { - return false, err - } - if err := f.pt.initPollingMethod(); err != nil { - return false, err - } - if err := f.pt.updatePollingMethod(); err != nil { - return false, err - } - return f.pt.hasTerminated(), f.pt.pollingError() -} - -// GetPollingDelay returns a duration the application should wait before checking -// the status of the asynchronous request and true; this value is returned from -// the service via the Retry-After response header. If the header wasn't returned -// then the function returns the zero-value time.Duration and false. -func (f Future) GetPollingDelay() (time.Duration, bool) { - if f.pt == nil { - return 0, false - } - resp := f.pt.latestResponse() - if resp == nil { - return 0, false - } - - retry := resp.Header.Get(autorest.HeaderRetryAfter) - if retry == "" { - return 0, false - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - panic(err) - } - - return d, true -} - -// WaitForCompletion will return when one of the following conditions is met: the long -// running operation has completed, the provided context is cancelled, or the client's -// polling duration has been exceeded. It will retry failed polling attempts based on -// the retry value defined in the client up to the maximum retry attempts. -// Deprecated: Please use WaitForCompletionRef() instead. -func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error { - return f.WaitForCompletionRef(ctx, client) -} - -// WaitForCompletionRef will return when one of the following conditions is met: the long -// running operation has completed, the provided context is cancelled, or the client's -// polling duration has been exceeded. It will retry failed polling attempts based on -// the retry value defined in the client up to the maximum retry attempts. -func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) error { - ctx, cancel := context.WithTimeout(ctx, client.PollingDuration) - defer cancel() - done, err := f.Done(client) - for attempts := 0; !done; done, err = f.Done(client) { - if attempts >= client.RetryAttempts { - return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") - } - // we want delayAttempt to be zero in the non-error case so - // that DelayForBackoff doesn't perform exponential back-off - var delayAttempt int - var delay time.Duration - if err == nil { - // check for Retry-After delay, if not present use the client's polling delay - var ok bool - delay, ok = f.GetPollingDelay() - if !ok { - delay = client.PollingDelay - } - } else { - // there was an error polling for status so perform exponential - // back-off based on the number of attempts using the client's retry - // duration. update attempts after delayAttempt to avoid off-by-one. - delayAttempt = attempts - delay = client.RetryDuration - attempts++ - } - // wait until the delay elapses or the context is cancelled - delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done()) - if !delayElapsed { - return autorest.NewErrorWithError(ctx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") - } - } - return err -} - -// MarshalJSON implements the json.Marshaler interface. -func (f Future) MarshalJSON() ([]byte, error) { - return json.Marshal(f.pt) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (f *Future) UnmarshalJSON(data []byte) error { - // unmarshal into JSON object to determine the tracker type - obj := map[string]interface{}{} - err := json.Unmarshal(data, &obj) - if err != nil { - return err - } - if obj["method"] == nil { - return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") - } - method := obj["method"].(string) - switch strings.ToUpper(method) { - case http.MethodDelete: - f.pt = &pollingTrackerDelete{} - case http.MethodPatch: - f.pt = &pollingTrackerPatch{} - case http.MethodPost: - f.pt = &pollingTrackerPost{} - case http.MethodPut: - f.pt = &pollingTrackerPut{} - default: - return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) - } - // now unmarshal into the tracker - return json.Unmarshal(data, &f.pt) -} - -// PollingURL returns the URL used for retrieving the status of the long-running operation. -func (f Future) PollingURL() string { - if f.pt == nil { - return "" - } - return f.pt.pollingURL() -} - -// GetResult should be called once polling has completed successfully. -// It makes the final GET call to retrieve the resultant payload. -func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { - if f.pt.finalGetURL() == "" { - // we can end up in this situation if the async operation returns a 200 - // with no polling URLs. in that case return the response which should - // contain the JSON payload (only do this for successful terminal cases). - if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { - return lr, nil - } - return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") - } - req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) - if err != nil { - return nil, err - } - return sender.Do(req) -} - -type pollingTracker interface { - // these methods can differ per tracker - - // checks the response headers and status code to determine the polling mechanism - updatePollingMethod() error - - // checks the response for tracker-specific error conditions - checkForErrors() error - - // returns true if provisioning state should be checked - provisioningStateApplicable() bool - - // methods common to all trackers - - // initializes a tracker's polling URL and method, called for each iteration. - // these values can be overridden by each polling tracker as required. - initPollingMethod() error - - // initializes the tracker's internal state, call this when the tracker is created - initializeState() error - - // makes an HTTP request to check the status of the LRO - pollForStatus(sender autorest.Sender) error - - // updates internal tracker state, call this after each call to pollForStatus - updatePollingState(provStateApl bool) error - - // returns the error response from the service, can be nil - pollingError() error - - // returns the polling method being used - pollingMethod() PollingMethodType - - // returns the state of the LRO as returned from the service - pollingStatus() string - - // returns the URL used for polling status - pollingURL() string - - // returns the URL used for the final GET to retrieve the resource - finalGetURL() string - - // returns true if the LRO is in a terminal state - hasTerminated() bool - - // returns true if the LRO is in a failed terminal state - hasFailed() bool - - // returns true if the LRO is in a successful terminal state - hasSucceeded() bool - - // returns the cached HTTP response after a call to pollForStatus(), can be nil - latestResponse() *http.Response -} - -type pollingTrackerBase struct { - // resp is the last response, either from the submission of the LRO or from polling - resp *http.Response - - // method is the HTTP verb, this is needed for deserialization - Method string `json:"method"` - - // rawBody is the raw JSON response body - rawBody map[string]interface{} - - // denotes if polling is using async-operation or location header - Pm PollingMethodType `json:"pollingMethod"` - - // the URL to poll for status - URI string `json:"pollingURI"` - - // the state of the LRO as returned from the service - State string `json:"lroState"` - - // the URL to GET for the final result - FinalGetURI string `json:"resultURI"` - - // used to hold an error object returned from the service - Err *ServiceError `json:"error,omitempty"` -} - -func (pt *pollingTrackerBase) initializeState() error { - // determine the initial polling state based on response body and/or HTTP status - // code. this is applicable to the initial LRO response, not polling responses! - pt.Method = pt.resp.Request.Method - if err := pt.updateRawBody(); err != nil { - return err - } - switch pt.resp.StatusCode { - case http.StatusOK: - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - if pt.hasFailed() { - pt.updateErrorFromResponse() - return pt.pollingError() - } - } else { - pt.State = operationSucceeded - } - case http.StatusCreated: - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - } else { - pt.State = operationInProgress - } - case http.StatusAccepted: - pt.State = operationInProgress - case http.StatusNoContent: - pt.State = operationSucceeded - default: - pt.State = operationFailed - pt.updateErrorFromResponse() - return pt.pollingError() - } - return pt.initPollingMethod() -} - -func (pt pollingTrackerBase) getProvisioningState() *string { - if pt.rawBody != nil && pt.rawBody["properties"] != nil { - p := pt.rawBody["properties"].(map[string]interface{}) - if ps := p["provisioningState"]; ps != nil { - s := ps.(string) - return &s - } - } - return nil -} - -func (pt *pollingTrackerBase) updateRawBody() error { - pt.rawBody = map[string]interface{}{} - if pt.resp.ContentLength != 0 { - defer pt.resp.Body.Close() - b, err := ioutil.ReadAll(pt.resp.Body) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") - } - // put the body back so it's available to other callers - pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - if err = json.Unmarshal(b, &pt.rawBody); err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") - } - } - return nil -} - -func (pt *pollingTrackerBase) pollForStatus(sender autorest.Sender) error { - req, err := http.NewRequest(http.MethodGet, pt.URI, nil) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") - } - // attach the context from the original request if available (it will be absent for deserialized futures) - if pt.resp != nil { - req = req.WithContext(pt.resp.Request.Context()) - } - pt.resp, err = sender.Do(req) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") - } - if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { - // reset the service error on success case - pt.Err = nil - err = pt.updateRawBody() - } else { - // check response body for error content - pt.updateErrorFromResponse() - err = pt.pollingError() - } - return err -} - -// attempts to unmarshal a ServiceError type from the response body. -// if that fails then make a best attempt at creating something meaningful. -// NOTE: this assumes that the async operation has failed. -func (pt *pollingTrackerBase) updateErrorFromResponse() { - var err error - if pt.resp.ContentLength != 0 { - type respErr struct { - ServiceError *ServiceError `json:"error"` - } - re := respErr{} - defer pt.resp.Body.Close() - var b []byte - if b, err = ioutil.ReadAll(pt.resp.Body); err != nil { - goto Default - } - if err = json.Unmarshal(b, &re); err != nil { - goto Default - } - // unmarshalling the error didn't yield anything, try unwrapped error - if re.ServiceError == nil { - err = json.Unmarshal(b, &re.ServiceError) - if err != nil { - goto Default - } - } - // the unmarshaller will ensure re.ServiceError is non-nil - // even if there was no content unmarshalled so check the code. - if re.ServiceError.Code != "" { - pt.Err = re.ServiceError - return - } - } -Default: - se := &ServiceError{ - Code: pt.pollingStatus(), - Message: "The async operation failed.", - } - if err != nil { - se.InnerError = make(map[string]interface{}) - se.InnerError["unmarshalError"] = err.Error() - } - // stick the response body into the error object in hopes - // it contains something useful to help diagnose the failure. - if len(pt.rawBody) > 0 { - se.AdditionalInfo = []map[string]interface{}{ - pt.rawBody, - } - } - pt.Err = se -} - -func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { - if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { - pt.State = pt.rawBody["status"].(string) - } else { - if pt.resp.StatusCode == http.StatusAccepted { - pt.State = operationInProgress - } else if provStateApl { - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - } else { - pt.State = operationSucceeded - } - } else { - return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") - } - } - // if the operation has failed update the error state - if pt.hasFailed() { - pt.updateErrorFromResponse() - } - return nil -} - -func (pt pollingTrackerBase) pollingError() error { - if pt.Err == nil { - return nil - } - return pt.Err -} - -func (pt pollingTrackerBase) pollingMethod() PollingMethodType { - return pt.Pm -} - -func (pt pollingTrackerBase) pollingStatus() string { - return pt.State -} - -func (pt pollingTrackerBase) pollingURL() string { - return pt.URI -} - -func (pt pollingTrackerBase) finalGetURL() string { - return pt.FinalGetURI -} - -func (pt pollingTrackerBase) hasTerminated() bool { - return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) -} - -func (pt pollingTrackerBase) hasFailed() bool { - return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) -} - -func (pt pollingTrackerBase) hasSucceeded() bool { - return strings.EqualFold(pt.State, operationSucceeded) -} - -func (pt pollingTrackerBase) latestResponse() *http.Response { - return pt.resp -} - -// error checking common to all trackers -func (pt pollingTrackerBase) baseCheckForErrors() error { - // for Azure-AsyncOperations the response body cannot be nil or empty - if pt.Pm == PollingAsyncOperation { - if pt.resp.Body == nil || pt.resp.ContentLength == 0 { - return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") - } - if pt.rawBody["status"] == nil { - return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") - } - } - return nil -} - -// default initialization of polling URL/method. each verb tracker will update this as required. -func (pt *pollingTrackerBase) initPollingMethod() error { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - return nil - } - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh != "" { - pt.URI = lh - pt.Pm = PollingLocation - return nil - } - // it's ok if we didn't find a polling header, this will be handled elsewhere - return nil -} - -// DELETE - -type pollingTrackerDelete struct { - pollingTrackerBase -} - -func (pt *pollingTrackerDelete) updatePollingMethod() error { - // for 201 the Location header is required - if pt.resp.StatusCode == http.StatusCreated { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") - } else { - pt.URI = lh - } - pt.Pm = PollingLocation - pt.FinalGetURI = pt.URI - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerDelete) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerDelete) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent -} - -// PATCH - -type pollingTrackerPatch struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPatch) updatePollingMethod() error { - // by default we can use the original URL for polling and final GET - if pt.URI == "" { - pt.URI = pt.resp.Request.URL.String() - } - if pt.FinalGetURI == "" { - pt.FinalGetURI = pt.resp.Request.URL.String() - } - if pt.Pm == PollingUnknown { - pt.Pm = PollingRequestURI - } - // for 201 it's permissible for no headers to be returned - if pt.resp.StatusCode == http.StatusCreated { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - // note the absense of the "final GET" mechanism for PATCH - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - if ao == "" { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } else { - pt.URI = lh - pt.Pm = PollingLocation - } - } - } - return nil -} - -func (pt pollingTrackerPatch) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerPatch) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated -} - -// POST - -type pollingTrackerPost struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPost) updatePollingMethod() error { - // 201 requires Location header - if pt.resp.StatusCode == http.StatusCreated { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") - } else { - pt.URI = lh - pt.FinalGetURI = lh - pt.Pm = PollingLocation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerPost) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerPost) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent -} - -// PUT - -type pollingTrackerPut struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPut) updatePollingMethod() error { - // by default we can use the original URL for polling and final GET - if pt.URI == "" { - pt.URI = pt.resp.Request.URL.String() - } - if pt.FinalGetURI == "" { - pt.FinalGetURI = pt.resp.Request.URL.String() - } - if pt.Pm == PollingUnknown { - pt.Pm = PollingRequestURI - } - // for 201 it's permissible for no headers to be returned - if pt.resp.StatusCode == http.StatusCreated { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerPut) checkForErrors() error { - err := pt.baseCheckForErrors() - if err != nil { - return err - } - // if there are no LRO headers then the body cannot be empty - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } - lh, err := getURLFromLocationHeader(pt.resp) - if err != nil { - return err - } - if ao == "" && lh == "" && len(pt.rawBody) == 0 { - return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") - } - return nil -} - -func (pt pollingTrackerPut) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated -} - -// creates a polling tracker based on the verb of the original request -func createPollingTracker(resp *http.Response) (pollingTracker, error) { - var pt pollingTracker - switch strings.ToUpper(resp.Request.Method) { - case http.MethodDelete: - pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPatch: - pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPost: - pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPut: - pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} - default: - return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) - } - if err := pt.initializeState(); err != nil { - return pt, err - } - // this initializes the polling header values, we do this during creation in case the - // initial response send us invalid values; this way the API call will return a non-nil - // error (not doing this means the error shows up in Future.Done) - return pt, pt.updatePollingMethod() -} - -// gets the polling URL from the Azure-AsyncOperation header. -// ensures the URL is well-formed and absolute. -func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { - s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) - if s == "" { - return "", nil - } - if !isValidURL(s) { - return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) - } - return s, nil -} - -// gets the polling URL from the Location header. -// ensures the URL is well-formed and absolute. -func getURLFromLocationHeader(resp *http.Response) (string, error) { - s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) - if s == "" { - return "", nil - } - if !isValidURL(s) { - return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) - } - return s, nil -} - -// verify that the URL is valid and absolute -func isValidURL(s string) bool { - u, err := url.Parse(s) - return err == nil && u.IsAbs() -} - -// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure -// long-running operation. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled via -// the context associated with the http.Request. -// Deprecated: Prefer using Futures to allow for non-blocking async operations. -func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - return resp, err - } - if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { - return resp, nil - } - future, err := NewFutureFromResponse(resp) - if err != nil { - return resp, err - } - // retry until either the LRO completes or we receive an error - var done bool - for done, err = future.Done(s); !done && err == nil; done, err = future.Done(s) { - // check for Retry-After delay, if not present use the specified polling delay - if pd, ok := future.GetPollingDelay(); ok { - delay = pd - } - // wait until the delay elapses or the context is cancelled - if delayElapsed := autorest.DelayForBackoff(delay, 0, r.Context().Done()); !delayElapsed { - return future.Response(), - autorest.NewErrorWithError(r.Context().Err(), "azure", "DoPollForAsynchronous", future.Response(), "context has been cancelled") - } - } - return future.Response(), err - }) - } -} - -// PollingMethodType defines a type used for enumerating polling mechanisms. -type PollingMethodType string - -const ( - // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. - PollingAsyncOperation PollingMethodType = "AsyncOperation" - - // PollingLocation indicates the polling method uses the Location header. - PollingLocation PollingMethodType = "Location" - - // PollingRequestURI indicates the polling method uses the original request URI. - PollingRequestURI PollingMethodType = "RequestURI" - - // PollingUnknown indicates an unknown polling method and is the default value. - PollingUnknown PollingMethodType = "" -) - -// AsyncOpIncompleteError is the type that's returned from a future that has not completed. -type AsyncOpIncompleteError struct { - // FutureType is the name of the type composed of a azure.Future. - FutureType string -} - -// Error returns an error message including the originating type name of the error. -func (e AsyncOpIncompleteError) Error() string { - return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) -} - -// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. -func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { - return AsyncOpIncompleteError{ - FutureType: futureType, - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go deleted file mode 100644 index 3a0a439ff..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ /dev/null @@ -1,326 +0,0 @@ -// Package azure provides Azure-specific implementations used with AutoRest. -// See the included examples for more detail. -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "regexp" - "strconv" - "strings" - - "github.com/Azure/go-autorest/autorest" -) - -const ( - // HeaderClientID is the Azure extension header to set a user-specified request ID. - HeaderClientID = "x-ms-client-request-id" - - // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID - // should be included in the response. - HeaderReturnClientID = "x-ms-return-client-request-id" - - // HeaderRequestID is the Azure extension header of the service generated request ID returned - // in the response. - HeaderRequestID = "x-ms-request-id" -) - -// ServiceError encapsulates the error response from an Azure service. -// It adhears to the OData v4 specification for error responses. -type ServiceError struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` -} - -func (se ServiceError) Error() string { - result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) - - if se.Target != nil { - result += fmt.Sprintf(" Target=%q", *se.Target) - } - - if se.Details != nil { - d, err := json.Marshal(se.Details) - if err != nil { - result += fmt.Sprintf(" Details=%v", se.Details) - } - result += fmt.Sprintf(" Details=%v", string(d)) - } - - if se.InnerError != nil { - d, err := json.Marshal(se.InnerError) - if err != nil { - result += fmt.Sprintf(" InnerError=%v", se.InnerError) - } - result += fmt.Sprintf(" InnerError=%v", string(d)) - } - - if se.AdditionalInfo != nil { - d, err := json.Marshal(se.AdditionalInfo) - if err != nil { - result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) - } - result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) - } - - return result -} - -// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. -func (se *ServiceError) UnmarshalJSON(b []byte) error { - // per the OData v4 spec the details field must be an array of JSON objects. - // unfortunately not all services adhear to the spec and just return a single - // object instead of an array with one object. so we have to perform some - // shenanigans to accommodate both cases. - // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 - - type serviceError1 struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` - } - - type serviceError2 struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` - } - - se1 := serviceError1{} - err := json.Unmarshal(b, &se1) - if err == nil { - se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) - return nil - } - - se2 := serviceError2{} - err = json.Unmarshal(b, &se2) - if err == nil { - se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) - se.Details = append(se.Details, se2.Details) - return nil - } - return err -} - -func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { - se.Code = code - se.Message = message - se.Target = target - se.Details = details - se.InnerError = inner - se.AdditionalInfo = additional -} - -// RequestError describes an error response returned by Azure service. -type RequestError struct { - autorest.DetailedError - - // The error returned by the Azure service. - ServiceError *ServiceError `json:"error"` - - // The request id (from the x-ms-request-id-header) of the request. - RequestID string -} - -// Error returns a human-friendly error message from service error. -func (e RequestError) Error() string { - return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", - e.StatusCode, e.ServiceError) -} - -// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. -func IsAzureError(e error) bool { - _, ok := e.(*RequestError) - return ok -} - -// Resource contains details about an Azure resource. -type Resource struct { - SubscriptionID string - ResourceGroup string - Provider string - ResourceType string - ResourceName string -} - -// ParseResourceID parses a resource ID into a ResourceDetails struct. -// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. -func ParseResourceID(resourceID string) (Resource, error) { - - const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` - resourceIDPattern := regexp.MustCompile(resourceIDPatternText) - match := resourceIDPattern.FindStringSubmatch(resourceID) - - if len(match) == 0 { - return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) - } - - v := strings.Split(match[5], "/") - resourceName := v[len(v)-1] - - result := Resource{ - SubscriptionID: match[1], - ResourceGroup: match[2], - Provider: match[3], - ResourceType: match[4], - ResourceName: resourceName, - } - - return result, nil -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { - if v, ok := original.(*RequestError); ok { - return *v - } - - statusCode := autorest.UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - return RequestError{ - DetailedError: autorest.DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - }, - } -} - -// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id -// header to true such that UUID accompanies the http.Response. -func WithReturningClientID(uuid string) autorest.PrepareDecorator { - preparer := autorest.CreatePreparer( - WithClientID(uuid), - WithReturnClientID(true)) - - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - return preparer.Prepare(r) - }) - } -} - -// WithClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). -func WithClientID(uuid string) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderClientID, uuid) -} - -// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-return-client-request-id whose boolean value indicates if the value of the -// x-ms-client-request-id header should be included in the http.Response. -func WithReturnClientID(b bool) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) -} - -// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the -// http.Request sent to the service (and returned in the http.Response) -func ExtractClientID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderClientID, resp) -} - -// ExtractRequestID extracts the Azure server generated request identifier from the -// x-ms-request-id header. -func ExtractRequestID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderRequestID, resp) -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an -// azure.RequestError by reading the response body unless the response HTTP status code -// is among the set passed. -// -// If there is a chance service may return responses other than the Azure error -// format and the response cannot be parsed into an error, a decoding error will -// be returned containing the response body. In any case, the Responder will -// return an error if the status code is not satisfied. -// -// If this Responder returns an error, the response body will be replaced with -// an in-memory reader, which needs no further closing. -func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { - return func(r autorest.Responder) autorest.Responder { - return autorest.ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { - var e RequestError - defer resp.Body.Close() - - // Copy and replace the Body in case it does not contain an error object. - // This will leave the Body available to the caller. - b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) - resp.Body = ioutil.NopCloser(&b) - if decodeErr != nil { - return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) - } - if e.ServiceError == nil { - // Check if error is unwrapped ServiceError - if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil { - return err - } - } - if e.ServiceError.Message == "" { - // if we're here it means the returned error wasn't OData v4 compliant. - // try to unmarshal the body as raw JSON in hopes of getting something. - rawBody := map[string]interface{}{} - if err := json.Unmarshal(b.Bytes(), &rawBody); err != nil { - return err - } - e.ServiceError = &ServiceError{ - Code: "Unknown", - Message: "Unknown service error", - } - if len(rawBody) > 0 { - e.ServiceError.Details = []map[string]interface{}{rawBody} - } - } - e.Response = resp - e.RequestID = ExtractRequestID(resp) - if e.StatusCode == nil { - e.StatusCode = resp.StatusCode - } - err = &e - } - return err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go deleted file mode 100644 index 7e41f7fd9..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ /dev/null @@ -1,191 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "strings" -) - -// EnvironmentFilepathName captures the name of the environment variable containing the path to the file -// to be used while populating the Azure Environment. -const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" - -var environments = map[string]Environment{ - "AZURECHINACLOUD": ChinaCloud, - "AZUREGERMANCLOUD": GermanCloud, - "AZUREPUBLICCLOUD": PublicCloud, - "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, -} - -// Environment represents a set of endpoints for each of Azure's Clouds. -type Environment struct { - Name string `json:"name"` - ManagementPortalURL string `json:"managementPortalURL"` - PublishSettingsURL string `json:"publishSettingsURL"` - ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` - ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` - ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` - GalleryEndpoint string `json:"galleryEndpoint"` - KeyVaultEndpoint string `json:"keyVaultEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - ServiceBusEndpoint string `json:"serviceBusEndpoint"` - BatchManagementEndpoint string `json:"batchManagementEndpoint"` - StorageEndpointSuffix string `json:"storageEndpointSuffix"` - SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` - TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` - KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` - ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` - ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` - ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` - ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` - TokenAudience string `json:"tokenAudience"` -} - -var ( - // PublicCloud is the default public Azure cloud environment - PublicCloud = Environment{ - Name: "AzurePublicCloud", - ManagementPortalURL: "https://manage.windowsazure.com/", - PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.windows.net/", - ResourceManagerEndpoint: "https://management.azure.com/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", - GalleryEndpoint: "https://gallery.azure.com/", - KeyVaultEndpoint: "https://vault.azure.net/", - GraphEndpoint: "https://graph.windows.net/", - ServiceBusEndpoint: "https://servicebus.windows.net/", - BatchManagementEndpoint: "https://batch.core.windows.net/", - StorageEndpointSuffix: "core.windows.net", - SQLDatabaseDNSSuffix: "database.windows.net", - TrafficManagerDNSSuffix: "trafficmanager.net", - KeyVaultDNSSuffix: "vault.azure.net", - ServiceBusEndpointSuffix: "servicebus.windows.net", - ServiceManagementVMDNSSuffix: "cloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.azure.com", - ContainerRegistryDNSSuffix: "azurecr.io", - TokenAudience: "https://management.azure.com/", - } - - // USGovernmentCloud is the cloud environment for the US Government - USGovernmentCloud = Environment{ - Name: "AzureUSGovernmentCloud", - ManagementPortalURL: "https://manage.windowsazure.us/", - PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", - ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", - GalleryEndpoint: "https://gallery.usgovcloudapi.net/", - KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", - GraphEndpoint: "https://graph.windows.net/", - ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", - BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", - StorageEndpointSuffix: "core.usgovcloudapi.net", - SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", - TrafficManagerDNSSuffix: "usgovtrafficmanager.net", - KeyVaultDNSSuffix: "vault.usgovcloudapi.net", - ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", - ServiceManagementVMDNSSuffix: "usgovcloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", - ContainerRegistryDNSSuffix: "azurecr.io", - TokenAudience: "https://management.usgovcloudapi.net/", - } - - // ChinaCloud is the cloud environment operated in China - ChinaCloud = Environment{ - Name: "AzureChinaCloud", - ManagementPortalURL: "https://manage.chinacloudapi.com/", - PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", - ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", - ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", - GalleryEndpoint: "https://gallery.chinacloudapi.cn/", - KeyVaultEndpoint: "https://vault.azure.cn/", - GraphEndpoint: "https://graph.chinacloudapi.cn/", - ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", - BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", - StorageEndpointSuffix: "core.chinacloudapi.cn", - SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", - TrafficManagerDNSSuffix: "trafficmanager.cn", - KeyVaultDNSSuffix: "vault.azure.cn", - ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", - ServiceManagementVMDNSSuffix: "chinacloudapp.cn", - ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", - ContainerRegistryDNSSuffix: "azurecr.io", - TokenAudience: "https://management.chinacloudapi.cn/", - } - - // GermanCloud is the cloud environment operated in Germany - GermanCloud = Environment{ - Name: "AzureGermanCloud", - ManagementPortalURL: "http://portal.microsoftazure.de/", - PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.cloudapi.de/", - ResourceManagerEndpoint: "https://management.microsoftazure.de/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", - GalleryEndpoint: "https://gallery.cloudapi.de/", - KeyVaultEndpoint: "https://vault.microsoftazure.de/", - GraphEndpoint: "https://graph.cloudapi.de/", - ServiceBusEndpoint: "https://servicebus.cloudapi.de/", - BatchManagementEndpoint: "https://batch.cloudapi.de/", - StorageEndpointSuffix: "core.cloudapi.de", - SQLDatabaseDNSSuffix: "database.cloudapi.de", - TrafficManagerDNSSuffix: "azuretrafficmanager.de", - KeyVaultDNSSuffix: "vault.microsoftazure.de", - ServiceBusEndpointSuffix: "servicebus.cloudapi.de", - ServiceManagementVMDNSSuffix: "azurecloudapp.de", - ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", - ContainerRegistryDNSSuffix: "azurecr.io", - TokenAudience: "https://management.microsoftazure.de/", - } -) - -// EnvironmentFromName returns an Environment based on the common name specified. -func EnvironmentFromName(name string) (Environment, error) { - // IMPORTANT - // As per @radhikagupta5: - // This is technical debt, fundamentally here because Kubernetes is not currently accepting - // contributions to the providers. Once that is an option, the provider should be updated to - // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation - // from this method based on the name that is provided to us. - if strings.EqualFold(name, "AZURESTACKCLOUD") { - return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) - } - - name = strings.ToUpper(name) - env, ok := environments[name] - if !ok { - return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) - } - - return env, nil -} - -// EnvironmentFromFile loads an Environment from a configuration file available on disk. -// This function is particularly useful in the Hybrid Cloud model, where one must define their own -// endpoints. -func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { - fileContents, err := ioutil.ReadFile(location) - if err != nil { - return - } - - err = json.Unmarshal(fileContents, &unmarshaled) - - return -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go deleted file mode 100644 index 507f9e95c..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go +++ /dev/null @@ -1,245 +0,0 @@ -package azure - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - - "github.com/Azure/go-autorest/autorest" -) - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -type audience []string - -type authentication struct { - LoginEndpoint string `json:"loginEndpoint"` - Audiences audience `json:"audiences"` -} - -type environmentMetadataInfo struct { - GalleryEndpoint string `json:"galleryEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - PortalEndpoint string `json:"portalEndpoint"` - Authentication authentication `json:"authentication"` -} - -// EnvironmentProperty represent property names that clients can override -type EnvironmentProperty string - -const ( - // EnvironmentName ... - EnvironmentName EnvironmentProperty = "name" - // EnvironmentManagementPortalURL .. - EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" - // EnvironmentPublishSettingsURL ... - EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" - // EnvironmentServiceManagementEndpoint ... - EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" - // EnvironmentResourceManagerEndpoint ... - EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" - // EnvironmentActiveDirectoryEndpoint ... - EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" - // EnvironmentGalleryEndpoint ... - EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" - // EnvironmentKeyVaultEndpoint ... - EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" - // EnvironmentGraphEndpoint ... - EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" - // EnvironmentServiceBusEndpoint ... - EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" - // EnvironmentBatchManagementEndpoint ... - EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" - // EnvironmentStorageEndpointSuffix ... - EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" - // EnvironmentSQLDatabaseDNSSuffix ... - EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" - // EnvironmentTrafficManagerDNSSuffix ... - EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" - // EnvironmentKeyVaultDNSSuffix ... - EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" - // EnvironmentServiceBusEndpointSuffix ... - EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" - // EnvironmentServiceManagementVMDNSSuffix ... - EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" - // EnvironmentResourceManagerVMDNSSuffix ... - EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" - // EnvironmentContainerRegistryDNSSuffix ... - EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" - // EnvironmentTokenAudience ... - EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" -) - -// OverrideProperty represents property name and value that clients can override -type OverrideProperty struct { - Key EnvironmentProperty - Value string -} - -// EnvironmentFromURL loads an Environment from a URL -// This function is particularly useful in the Hybrid Cloud model, where one may define their own -// endpoints. -func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { - var metadataEnvProperties environmentMetadataInfo - - if resourceManagerEndpoint == "" { - return environment, fmt.Errorf("Metadata resource manager endpoint is empty") - } - - if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { - return environment, err - } - - // Give priority to user's override values - overrideProperties(&environment, properties) - - if environment.Name == "" { - environment.Name = "HybridEnvironment" - } - stampDNSSuffix := environment.StorageEndpointSuffix - if stampDNSSuffix == "" { - stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") - environment.StorageEndpointSuffix = stampDNSSuffix - } - if environment.KeyVaultDNSSuffix == "" { - environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) - } - if environment.KeyVaultEndpoint == "" { - environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) - } - if environment.TokenAudience == "" { - environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] - } - if environment.ActiveDirectoryEndpoint == "" { - environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint - } - if environment.ResourceManagerEndpoint == "" { - environment.ResourceManagerEndpoint = resourceManagerEndpoint - } - if environment.GalleryEndpoint == "" { - environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint - } - if environment.GraphEndpoint == "" { - environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint - } - - return environment, nil -} - -func overrideProperties(environment *Environment, properties []OverrideProperty) { - for _, property := range properties { - switch property.Key { - case EnvironmentName: - { - environment.Name = property.Value - } - case EnvironmentManagementPortalURL: - { - environment.ManagementPortalURL = property.Value - } - case EnvironmentPublishSettingsURL: - { - environment.PublishSettingsURL = property.Value - } - case EnvironmentServiceManagementEndpoint: - { - environment.ServiceManagementEndpoint = property.Value - } - case EnvironmentResourceManagerEndpoint: - { - environment.ResourceManagerEndpoint = property.Value - } - case EnvironmentActiveDirectoryEndpoint: - { - environment.ActiveDirectoryEndpoint = property.Value - } - case EnvironmentGalleryEndpoint: - { - environment.GalleryEndpoint = property.Value - } - case EnvironmentKeyVaultEndpoint: - { - environment.KeyVaultEndpoint = property.Value - } - case EnvironmentGraphEndpoint: - { - environment.GraphEndpoint = property.Value - } - case EnvironmentServiceBusEndpoint: - { - environment.ServiceBusEndpoint = property.Value - } - case EnvironmentBatchManagementEndpoint: - { - environment.BatchManagementEndpoint = property.Value - } - case EnvironmentStorageEndpointSuffix: - { - environment.StorageEndpointSuffix = property.Value - } - case EnvironmentSQLDatabaseDNSSuffix: - { - environment.SQLDatabaseDNSSuffix = property.Value - } - case EnvironmentTrafficManagerDNSSuffix: - { - environment.TrafficManagerDNSSuffix = property.Value - } - case EnvironmentKeyVaultDNSSuffix: - { - environment.KeyVaultDNSSuffix = property.Value - } - case EnvironmentServiceBusEndpointSuffix: - { - environment.ServiceBusEndpointSuffix = property.Value - } - case EnvironmentServiceManagementVMDNSSuffix: - { - environment.ServiceManagementVMDNSSuffix = property.Value - } - case EnvironmentResourceManagerVMDNSSuffix: - { - environment.ResourceManagerVMDNSSuffix = property.Value - } - case EnvironmentContainerRegistryDNSSuffix: - { - environment.ContainerRegistryDNSSuffix = property.Value - } - case EnvironmentTokenAudience: - { - environment.TokenAudience = property.Value - } - } - } -} - -func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { - client := autorest.NewClientWithUserAgent("") - managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") - req, _ := http.NewRequest("GET", managementEndpoint, nil) - response, err := client.Do(req) - if err != nil { - return environment, err - } - defer response.Body.Close() - jsonResponse, err := ioutil.ReadAll(response.Body) - if err != nil { - return environment, err - } - err = json.Unmarshal(jsonResponse, &environment) - return environment, err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go deleted file mode 100644 index bd34f0ed5..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package azure - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" -) - -// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. -// It also handles request retries -func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := autorest.NewRetriableRequest(r) - for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - - resp, err = autorest.SendWithSender(s, rr.Request(), - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return resp, err - } - - if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { - return resp, err - } - var re RequestError - err = autorest.Respond( - resp, - autorest.ByUnmarshallingJSON(&re), - ) - if err != nil { - return resp, err - } - err = re - - if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { - regErr := register(client, r, re) - if regErr != nil { - return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) - } - } - } - return resp, err - }) - } -} - -func getProvider(re RequestError) (string, error) { - if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { - return re.ServiceError.Details[0]["target"].(string), nil - } - return "", errors.New("provider was not found in the response") -} - -func register(client autorest.Client, originalReq *http.Request, re RequestError) error { - subID := getSubscription(originalReq.URL.Path) - if subID == "" { - return errors.New("missing parameter subscriptionID to register resource provider") - } - providerName, err := getProvider(re) - if err != nil { - return fmt.Errorf("missing parameter provider to register resource provider: %s", err) - } - newURL := url.URL{ - Scheme: originalReq.URL.Scheme, - Host: originalReq.URL.Host, - } - - // taken from the resources SDK - // with almost identical code, this sections are easier to mantain - // It is also not a good idea to import the SDK here - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": autorest.Encode("path", providerName), - "subscriptionId": autorest.Encode("path", subID), - } - - const APIVersion = "2016-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - - req, err := preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req = req.WithContext(originalReq.Context()) - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - type Provider struct { - RegistrationState *string `json:"registrationState,omitempty"` - } - var provider Provider - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - // poll for registered provisioning state - now := time.Now() - for err == nil && time.Since(now) < client.PollingDuration { - // taken from the resources SDK - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - req, err = preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req = req.WithContext(originalReq.Context()) - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - if provider.RegistrationState != nil && - *provider.RegistrationState == "Registered" { - break - } - - delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) - if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { - return originalReq.Context().Err() - } - } - if !(time.Since(now) < client.PollingDuration) { - return errors.New("polling for resource provider registration has exceeded the polling duration") - } - return err -} - -func getSubscription(path string) string { - parts := strings.Split(path, "/") - for i, v := range parts { - if v == "subscriptions" && (i+1) < len(parts) { - return parts[i+1] - } - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go deleted file mode 100644 index 5c558c83a..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ /dev/null @@ -1,267 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "net/http/cookiejar" - "strings" - "time" - - "github.com/Azure/go-autorest/logger" - "github.com/Azure/go-autorest/version" -) - -const ( - // DefaultPollingDelay is a reasonable delay between polling requests. - DefaultPollingDelay = 60 * time.Second - - // DefaultPollingDuration is a reasonable total polling duration. - DefaultPollingDuration = 15 * time.Minute - - // DefaultRetryAttempts is number of attempts for retry status codes (5xx). - DefaultRetryAttempts = 3 - - // DefaultRetryDuration is the duration to wait between retries. - DefaultRetryDuration = 30 * time.Second -) - -var ( - // StatusCodesForRetry are a defined group of status code for which the client will retry - StatusCodesForRetry = []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } -) - -const ( - requestFormat = `HTTP Request Begin =================================================== -%s -===================================================== HTTP Request End -` - responseFormat = `HTTP Response Begin =================================================== -%s -===================================================== HTTP Response End -` -) - -// Response serves as the base for all responses from generated clients. It provides access to the -// last http.Response. -type Response struct { - *http.Response `json:"-"` -} - -// LoggingInspector implements request and response inspectors that log the full request and -// response to a supplied log. -type LoggingInspector struct { - Logger *log.Logger -} - -// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) WithInspection() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - var body, b bytes.Buffer - - defer r.Body.Close() - - r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) - if err := r.Write(&b); err != nil { - return nil, fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(requestFormat, b.String()) - - r.Body = ioutil.NopCloser(&body) - return p.Prepare(r) - }) - } -} - -// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) ByInspecting() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - var body, b bytes.Buffer - defer resp.Body.Close() - resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) - if err := resp.Write(&b); err != nil { - return fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(responseFormat, b.String()) - - resp.Body = ioutil.NopCloser(&body) - return r.Respond(resp) - }) - } -} - -// Client is the base for autorest generated clients. It provides default, "do nothing" -// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the -// standard, undecorated http.Client as a default Sender. -// -// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and -// return responses that compose with Response. -// -// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom -// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit -// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence -// sending the request by providing a decorated Sender. -type Client struct { - Authorizer Authorizer - Sender Sender - RequestInspector PrepareDecorator - ResponseInspector RespondDecorator - - // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header - PollingDelay time.Duration - - // PollingDuration sets the maximum polling time after which an error is returned. - PollingDuration time.Duration - - // RetryAttempts sets the default number of retry attempts for client. - RetryAttempts int - - // RetryDuration sets the delay duration for retries. - RetryDuration time.Duration - - // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent - // through the Do method. - UserAgent string - - Jar http.CookieJar - - // Set to true to skip attempted registration of resource providers (false by default). - SkipResourceProviderRegistration bool -} - -// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed -// string. -func NewClientWithUserAgent(ua string) Client { - c := Client{ - PollingDelay: DefaultPollingDelay, - PollingDuration: DefaultPollingDuration, - RetryAttempts: DefaultRetryAttempts, - RetryDuration: DefaultRetryDuration, - UserAgent: version.UserAgent(), - } - c.Sender = c.sender() - c.AddToUserAgent(ua) - return c -} - -// AddToUserAgent adds an extension to the current user agent -func (c *Client) AddToUserAgent(extension string) error { - if extension != "" { - c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) -} - -// Do implements the Sender interface by invoking the active Sender after applying authorization. -// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent -// is set, apply set the User-Agent header. -func (c Client) Do(r *http.Request) (*http.Response, error) { - if r.UserAgent() == "" { - r, _ = Prepare(r, - WithUserAgent(c.UserAgent)) - } - // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations - r, err := Prepare(r, - c.WithAuthorization(), - c.WithInspection()) - if err != nil { - var resp *http.Response - if detErr, ok := err.(DetailedError); ok { - // if the authorization failed (e.g. invalid credentials) there will - // be a response associated with the error, be sure to return it. - resp = detErr.Response - } - return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") - } - logger.Instance.WriteRequest(r, logger.Filter{ - Header: func(k string, v []string) (bool, []string) { - // remove the auth token from the log - if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { - v = []string{"**REDACTED**"} - } - return true, v - }, - }) - resp, err := SendWithSender(c.sender(), r) - logger.Instance.WriteResponse(resp, logger.Filter{}) - Respond(resp, c.ByInspecting()) - return resp, err -} - -// sender returns the Sender to which to send requests. -func (c Client) sender() Sender { - if c.Sender == nil { - j, _ := cookiejar.New(nil) - return &http.Client{Jar: j} - } - return c.Sender -} - -// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator -// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. -func (c Client) WithAuthorization() PrepareDecorator { - return c.authorizer().WithAuthorization() -} - -// authorizer returns the Authorizer to use. -func (c Client) authorizer() Authorizer { - if c.Authorizer == nil { - return NullAuthorizer{} - } - return c.Authorizer -} - -// WithInspection is a convenience method that passes the request to the supplied RequestInspector, -// if present, or returns the WithNothing PrepareDecorator otherwise. -func (c Client) WithInspection() PrepareDecorator { - if c.RequestInspector == nil { - return WithNothing() - } - return c.RequestInspector -} - -// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, -// if present, or returns the ByIgnoring RespondDecorator otherwise. -func (c Client) ByInspecting() RespondDecorator { - if c.ResponseInspector == nil { - return ByIgnoring() - } - return c.ResponseInspector -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go deleted file mode 100644 index c45710656..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/date.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) -defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of -time.Time types. And both convert to time.Time through a ToTime method. -*/ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "time" -) - -const ( - fullDate = "2006-01-02" - fullDateJSON = `"2006-01-02"` - dateFormat = "%04d-%02d-%02d" - jsonFormat = `"%04d-%02d-%02d"` -) - -// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., -// 2006-01-02). -type Date struct { - time.Time -} - -// ParseDate create a new Date from the passed string. -func ParseDate(date string) (d Date, err error) { - return parseDate(date, fullDate) -} - -func parseDate(date string, format string) (Date, error) { - d, err := time.Parse(format, date) - return Date{Time: d}, err -} - -// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalBinary() ([]byte, error) { - return d.MarshalText() -} - -// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalBinary(data []byte) error { - return d.UnmarshalText(data) -} - -// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalJSON() (json []byte, err error) { - return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalJSON(data []byte) (err error) { - d.Time, err = time.Parse(fullDateJSON, string(data)) - return err -} - -// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalText() (text []byte, err error) { - return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalText(data []byte) (err error) { - d.Time, err = time.Parse(fullDate, string(data)) - return err -} - -// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). -func (d Date) String() string { - return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) -} - -// ToTime returns a Date as a time.Time -func (d Date) ToTime() time.Time { - return d.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go deleted file mode 100644 index b453fad04..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/time.go +++ /dev/null @@ -1,103 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "regexp" - "time" -) - -// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -const ( - azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` - azureUtcFormat = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` - rfc3339 = time.RFC3339Nano - tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` -) - -// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -type Time struct { - time.Time -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalBinary() ([]byte, error) { - return t.Time.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalJSON() (json []byte, err error) { - return t.Time.MarshalJSON() -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalJSON(data []byte) (err error) { - timeFormat := azureUtcFormatJSON - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339JSON - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalText() (text []byte, err error) { - return t.Time.MarshalText() -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalText(data []byte) (err error) { - timeFormat := azureUtcFormat - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339 - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// String returns the Time formatted as an RFC3339 date-time string (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) String() string { - // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} - -// ToTime returns a Time as a time.Time -func (t Time) ToTime() time.Time { - return t.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go deleted file mode 100644 index 48fb39ba9..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go +++ /dev/null @@ -1,100 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "time" -) - -const ( - rfc1123JSON = `"` + time.RFC1123 + `"` - rfc1123 = time.RFC1123 -) - -// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -type TimeRFC1123 struct { - time.Time -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123JSON, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalJSON() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") - } - b := []byte(t.Format(rfc1123JSON)) - return b, nil -} - -// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalText() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") - } - - b := []byte(t.Format(rfc1123)) - return b, nil -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalBinary() ([]byte, error) { - return t.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// ToTime returns a Time as a time.Time -func (t TimeRFC1123) ToTime() time.Time { - return t.Time -} - -// String returns the Time formatted as an RFC1123 date-time string (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) String() string { - // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go deleted file mode 100644 index 7073959b2..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go +++ /dev/null @@ -1,123 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "time" -) - -// unixEpoch is the moment in time that should be treated as timestamp 0. -var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) - -// UnixTime marshals and unmarshals a time that is represented as the number -// of seconds (ignoring skip-seconds) since the Unix Epoch. -type UnixTime time.Time - -// Duration returns the time as a Duration since the UnixEpoch. -func (t UnixTime) Duration() time.Duration { - return time.Time(t).Sub(unixEpoch) -} - -// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. -func NewUnixTimeFromSeconds(seconds float64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) -} - -// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. -func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(nanoseconds)) -} - -// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. -func NewUnixTimeFromDuration(dur time.Duration) UnixTime { - return UnixTime(unixEpoch.Add(dur)) -} - -// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' -func UnixEpoch() time.Time { - return unixEpoch -} - -// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. -// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) -func (t UnixTime) MarshalJSON() ([]byte, error) { - buffer := &bytes.Buffer{} - enc := json.NewEncoder(buffer) - err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) - if err != nil { - return nil, err - } - return buffer.Bytes(), nil -} - -// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since -// midnight January 1st, 1970. -func (t *UnixTime) UnmarshalJSON(text []byte) error { - dec := json.NewDecoder(bytes.NewReader(text)) - - var secondsSinceEpoch float64 - if err := dec.Decode(&secondsSinceEpoch); err != nil { - return err - } - - *t = NewUnixTimeFromSeconds(secondsSinceEpoch) - - return nil -} - -// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. -func (t UnixTime) MarshalText() ([]byte, error) { - cast := time.Time(t) - return cast.MarshalText() -} - -// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. -func (t *UnixTime) UnmarshalText(raw []byte) error { - var unmarshaled time.Time - - if err := unmarshaled.UnmarshalText(raw); err != nil { - return err - } - - *t = UnixTime(unmarshaled) - return nil -} - -// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. -func (t UnixTime) MarshalBinary() ([]byte, error) { - buf := &bytes.Buffer{} - - payload := int64(t.Duration()) - - if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. -func (t *UnixTime) UnmarshalBinary(raw []byte) error { - var nanosecondsSinceEpoch int64 - - if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { - return err - } - *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go deleted file mode 100644 index 12addf0eb..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go +++ /dev/null @@ -1,25 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "strings" - "time" -) - -// ParseTime to parse Time string to specified format. -func ParseTime(format string, t string) (d time.Time, err error) { - return time.Parse(format, strings.ToUpper(t)) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go deleted file mode 100644 index f724f3332..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/error.go +++ /dev/null @@ -1,98 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" -) - -const ( - // UndefinedStatusCode is used when HTTP status code is not available for an error. - UndefinedStatusCode = 0 -) - -// DetailedError encloses a error with details of the package, method, and associated HTTP -// status code (if any). -type DetailedError struct { - Original error - - // PackageType is the package type of the object emitting the error. For types, the value - // matches that produced the the '%T' format specifier of the fmt package. For other elements, - // such as functions, it is just the package name (e.g., "autorest"). - PackageType string - - // Method is the name of the method raising the error. - Method string - - // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. - StatusCode interface{} - - // Message is the error message. - Message string - - // Service Error is the response body of failed API in bytes - ServiceError []byte - - // Response is the response object that was returned during failure if applicable. - Response *http.Response -} - -// NewError creates a new Error conforming object from the passed packageType, method, and -// message. message is treated as a format string to which the optional args apply. -func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, nil, message, args...) -} - -// NewErrorWithResponse creates a new Error conforming object from the passed -// packageType, method, statusCode of the given resp (UndefinedStatusCode if -// resp is nil), and message. message is treated as a format string to which the -// optional args apply. -func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, resp, message, args...) -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - if v, ok := original.(DetailedError); ok { - return v - } - - statusCode := UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - - return DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - Response: resp, - } -} - -// Error returns a formatted containing all available details (i.e., PackageType, Method, -// StatusCode, Message, and original error (if any)). -func (e DetailedError) Error() string { - if e.Original == nil { - return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) - } - return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go deleted file mode 100644 index 6d67bd733..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ /dev/null @@ -1,480 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/url" - "strings" -) - -const ( - mimeTypeJSON = "application/json" - mimeTypeOctetStream = "application/octet-stream" - mimeTypeFormPost = "application/x-www-form-urlencoded" - - headerAuthorization = "Authorization" - headerContentType = "Content-Type" - headerUserAgent = "User-Agent" -) - -// Preparer is the interface that wraps the Prepare method. -// -// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations -// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. -type Preparer interface { - Prepare(*http.Request) (*http.Request, error) -} - -// PreparerFunc is a method that implements the Preparer interface. -type PreparerFunc func(*http.Request) (*http.Request, error) - -// Prepare implements the Preparer interface on PreparerFunc. -func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { - return pf(r) -} - -// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then affect the result. -type PrepareDecorator func(Preparer) Preparer - -// CreatePreparer creates, decorates, and returns a Preparer. -// Without decorators, the returned Preparer returns the passed http.Request unmodified. -// Preparers are safe to share and re-use. -func CreatePreparer(decorators ...PrepareDecorator) Preparer { - return DecoratePreparer( - Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), - decorators...) -} - -// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it -// applies to the Preparer. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (change the http.Request and then pass it -// along) or a post-decorator (pass the http.Request along and alter it on return). -func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { - for _, decorate := range decorators { - p = decorate(p) - } - return p -} - -// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. -// It creates a Preparer from the decorators which it then applies to the passed http.Request. -func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { - if r == nil { - return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") - } - return CreatePreparer(decorators...).Prepare(r) -} - -// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed -// http.Request. -func WithNothing() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - return p.Prepare(r) - }) - } -} - -// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to -// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before -// adding the header. -func WithHeader(header string, value string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(header), value) - } - return r, err - }) - } -} - -// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to -// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before -// adding them. -func WithHeaders(headers map[string]interface{}) PrepareDecorator { - h := ensureValueStrings(headers) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - - for name, value := range h { - r.Header.Set(http.CanonicalHeaderKey(name), value) - } - } - return r, err - }) - } -} - -// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the supplied token. -func WithBearerAuthorization(token string) PrepareDecorator { - return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) -} - -// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value -// is the passed contentType. -func AsContentType(contentType string) PrepareDecorator { - return WithHeader(headerContentType, contentType) -} - -// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the -// passed string. -func WithUserAgent(ua string) PrepareDecorator { - return WithHeader(headerUserAgent, ua) -} - -// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/x-www-form-urlencoded". -func AsFormURLEncoded() PrepareDecorator { - return AsContentType(mimeTypeFormPost) -} - -// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/json". -func AsJSON() PrepareDecorator { - return AsContentType(mimeTypeJSON) -} - -// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. -func AsOctetStream() PrepareDecorator { - return AsContentType(mimeTypeOctetStream) -} - -// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The -// decorator does not validate that the passed method string is a known HTTP method. -func WithMethod(method string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r.Method = method - return p.Prepare(r) - }) - } -} - -// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. -func AsDelete() PrepareDecorator { return WithMethod("DELETE") } - -// AsGet returns a PrepareDecorator that sets the HTTP method to GET. -func AsGet() PrepareDecorator { return WithMethod("GET") } - -// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. -func AsHead() PrepareDecorator { return WithMethod("HEAD") } - -// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. -func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } - -// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. -func AsPatch() PrepareDecorator { return WithMethod("PATCH") } - -// AsPost returns a PrepareDecorator that sets the HTTP method to POST. -func AsPost() PrepareDecorator { return WithMethod("POST") } - -// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. -func AsPut() PrepareDecorator { return WithMethod("PUT") } - -// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed -// from the supplied baseUrl. -func WithBaseURL(baseURL string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var u *url.URL - if u, err = url.Parse(baseURL); err != nil { - return r, err - } - if u.Scheme == "" { - err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) - } - if err == nil { - r.URL = u - } - } - return r, err - }) - } -} - -// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the -// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. -func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(urlParameters) - for key, value := range parameters { - baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) - } - return WithBaseURL(baseURL) -} - -// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the -// http.Request body. -func WithFormData(v url.Values) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - s := v.Encode() - - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) - r.ContentLength = int64(len(s)) - r.Body = ioutil.NopCloser(strings.NewReader(s)) - } - return r, err - }) - } -} - -// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters -// into the http.Request body. -func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var body bytes.Buffer - writer := multipart.NewWriter(&body) - for key, value := range formDataParameters { - if rc, ok := value.(io.ReadCloser); ok { - var fd io.Writer - if fd, err = writer.CreateFormFile(key, key); err != nil { - return r, err - } - if _, err = io.Copy(fd, rc); err != nil { - return r, err - } - } else { - if err = writer.WriteField(key, ensureValueString(value)); err != nil { - return r, err - } - } - } - if err = writer.Close(); err != nil { - return r, err - } - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) - r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) - r.ContentLength = int64(body.Len()) - return r, err - } - return r, err - }) - } -} - -// WithFile returns a PrepareDecorator that sends file in request body. -func WithFile(f io.ReadCloser) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := ioutil.ReadAll(f) - if err != nil { - return r, err - } - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - r.ContentLength = int64(len(b)) - } - return r, err - }) - } -} - -// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request -// and sets the Content-Length header. -func WithBool(v bool) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the -// request and sets the Content-Length header. -func WithFloat32(v float32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the -// request and sets the Content-Length header. -func WithFloat64(v float64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request -// and sets the Content-Length header. -func WithInt32(v int32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request -// and sets the Content-Length header. -func WithInt64(v int64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithString returns a PrepareDecorator that encodes the passed string into the body of the request -// and sets the Content-Length header. -func WithString(v string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - r.ContentLength = int64(len(v)) - r.Body = ioutil.NopCloser(strings.NewReader(v)) - } - return r, err - }) - } -} - -// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the -// request and sets the Content-Length header. -func WithJSON(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := json.Marshal(v) - if err == nil { - r.ContentLength = int64(len(b)) - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - } - return r, err - }) - } -} - -// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path -// is absolute (that is, it begins with a "/"), it replaces the existing path. -func WithPath(path string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPath", "Invoked with a nil URL") - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The -// values will be escaped (aka URL encoded) before insertion into the path. -func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := escapeValueStrings(ensureValueStrings(pathParameters)) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. -func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(pathParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -func parseURL(u *url.URL, path string) (*url.URL, error) { - p := strings.TrimRight(u.String(), "/") - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - return url.Parse(p + path) -} - -// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters -// given in the supplied map (i.e., key=value). -func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(queryParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") - } - - v := r.URL.Query() - for key, value := range parameters { - d, err := url.QueryUnescape(value) - if err != nil { - return r, err - } - v.Add(key, d) - } - r.URL.RawQuery = v.Encode() - } - return r, err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go deleted file mode 100644 index a908a0adb..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ /dev/null @@ -1,250 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" -) - -// Responder is the interface that wraps the Respond method. -// -// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold -// state since Responders may be shared and re-used. -type Responder interface { - Respond(*http.Response) error -} - -// ResponderFunc is a method that implements the Responder interface. -type ResponderFunc func(*http.Response) error - -// Respond implements the Responder interface on ResponderFunc. -func (rf ResponderFunc) Respond(r *http.Response) error { - return rf(r) -} - -// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to -// the http.Response and pass it along or, first, pass the http.Response along then react. -type RespondDecorator func(Responder) Responder - -// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned -// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share -// and re-used: It depends on the applied decorators. For example, a standard decorator that closes -// the response body is fine to share whereas a decorator that reads the body into a passed struct -// is not. -// -// To prevent memory leaks, ensure that at least one Responder closes the response body. -func CreateResponder(decorators ...RespondDecorator) Responder { - return DecorateResponder( - Responder(ResponderFunc(func(r *http.Response) error { return nil })), - decorators...) -} - -// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it -// applies to the Responder. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (react to the http.Response and then pass it -// along) or a post-decorator (pass the http.Response along and then react). -func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { - for _, decorate := range decorators { - r = decorate(r) - } - return r -} - -// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. -// It creates a Responder from the decorators it then applies to the passed http.Response. -func Respond(r *http.Response, decorators ...RespondDecorator) error { - if r == nil { - return nil - } - return CreateResponder(decorators...).Respond(r) -} - -// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined -// to the next RespondDecorator. -func ByIgnoring() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - return r.Respond(resp) - }) - } -} - -// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as -// the Body is read. -func ByCopying(b *bytes.Buffer) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - resp.Body = TeeReadCloser(resp.Body, b) - } - return err - }) - } -} - -// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which -// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed -// Responder is invoked prior to discarding the response body, the decorator may occur anywhere -// within the set. -func ByDiscardingBody() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { - return fmt.Errorf("Error discarding the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it -// closes the response body. Since the passed Responder is invoked prior to closing the response -// body, the decorator may occur anywhere within the set. -func ByClosing() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which -// it closes the response if the passed Responder returns an error and the response body exists. -func ByClosingIfError() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err != nil && resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingJSON(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - // Some responses might include a BOM, remove for successful unmarshalling - b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else if len(strings.Trim(string(b), " ")) > 0 { - errInner = json.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingXML(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else { - errInner = xml.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response -// StatusCode is among the set passed. On error, response body is fully read into a buffer and -// presented in the returned error, as well as in the response body. -func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - if resp.Body != nil { - defer resp.Body.Close() - b, _ := ioutil.ReadAll(resp.Body) - derr.ServiceError = b - resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - err = derr - } - return err - }) - } -} - -// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is -// anything other than HTTP 200. -func WithErrorUnlessOK() RespondDecorator { - return WithErrorUnlessStatusCode(http.StatusOK) -} - -// ExtractHeader extracts all values of the specified header from the http.Response. It returns an -// empty string slice if the passed http.Response is nil or the header does not exist. -func ExtractHeader(header string, resp *http.Response) []string { - if resp != nil && resp.Header != nil { - return resp.Header[http.CanonicalHeaderKey(header)] - } - return nil -} - -// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It -// returns an empty string if the passed http.Response is nil or the header does not exist. -func ExtractHeaderValue(header string, resp *http.Response) string { - h := ExtractHeader(header, resp) - if len(h) > 0 { - return h[0] - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go deleted file mode 100644 index fa11dbed7..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go +++ /dev/null @@ -1,52 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. -func NewRetriableRequest(req *http.Request) *RetriableRequest { - return &RetriableRequest{req: req} -} - -// Request returns the wrapped HTTP request. -func (rr *RetriableRequest) Request() *http.Request { - return rr.req -} - -func (rr *RetriableRequest) prepareFromByteReader() (err error) { - // fall back to making a copy (only do this once) - b := []byte{} - if rr.req.ContentLength > 0 { - b = make([]byte, rr.req.ContentLength) - _, err = io.ReadFull(rr.req.Body, b) - if err != nil { - return err - } - } else { - b, err = ioutil.ReadAll(rr.req.Body) - if err != nil { - return err - } - } - rr.br = bytes.NewReader(b) - rr.req.Body = ioutil.NopCloser(rr.br) - return err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go deleted file mode 100644 index 7143cc61b..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build !go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.br != nil { - _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.ContentLength = 0 -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go deleted file mode 100644 index ae15c6bf9..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go +++ /dev/null @@ -1,66 +0,0 @@ -// +build go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - rc io.ReadCloser - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.rc != nil { - rr.req.Body = rr.rc - } else if rr.br != nil { - _, err = rr.br.Seek(0, io.SeekStart) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.req.GetBody != nil { - // this will allow us to preserve the body without having to - // make a copy. note we need to do this on each iteration - rr.rc, err = rr.req.GetBody() - if err != nil { - return err - } - } else if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.GetBody = nil - req.ContentLength = 0 -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go deleted file mode 100644 index e8893a282..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ /dev/null @@ -1,325 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "log" - "math" - "net/http" - "strconv" - "time" -) - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(&http.Client{}, decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} - -// Send sends, by means of the default http.Client, the passed http.Request, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// Send is a convenience method and not recommended for production. Advanced users should use -// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). -// -// Send will not poll or retry requests. -func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return SendWithSender(&http.Client{}, r, decorators...) -} - -// SendWithSender sends the passed http.Request, through the provided Sender, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// SendWithSender will not poll or retry requests. -func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return DecorateSender(s, decorators...).Do(r) -} - -// AfterDelay returns a SendDecorator that delays for the passed time.Duration before -// invoking the Sender. The delay may be terminated by closing the optional channel on the -// http.Request. If canceled, no further Senders are invoked. -func AfterDelay(d time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - if !DelayForBackoff(d, 0, r.Context().Done()) { - return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") - } - return s.Do(r) - }) - } -} - -// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. -func AsIs() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return s.Do(r) - }) - } -} - -// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which -// it closes the response if the passed Sender returns an error and the response body exists. -func DoCloseIfError() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - Respond(resp, ByDiscardingBody(), ByClosing()) - } - return resp, err - }) - } -} - -// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is -// among the set passed. Since these are artificial errors, the response body may still require -// closing. -func DoErrorIfStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response -// StatusCode is among the set passed. Since these are artificial errors, the response body -// may still require closing. -func DoErrorUnlessStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the -// passed status codes. It expects the http.Response to contain a Location header providing the -// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than -// the supplied duration. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by -// closing the optional channel on the http.Request. -func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - resp, err = s.Do(r) - - if err == nil && ResponseHasStatusCode(resp, codes...) { - r, err = NewPollingRequestWithContext(r.Context(), resp) - - for err == nil && ResponseHasStatusCode(resp, codes...) { - Respond(resp, - ByDiscardingBody(), - ByClosing()) - resp, err = SendWithSender(s, r, - AfterDelay(GetRetryAfter(resp, delay))) - } - } - - return resp, err - }) - } -} - -// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. -func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - for attempt := 0; attempt < attempts; attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - if !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() - } - } - return resp, err - }) - } -} - -// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. -func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - // Increment to add the first call (attempts denotes number of retries) - attempts++ - for attempt := 0; attempt < attempts; { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - // if the error isn't temporary don't bother retrying - if err != nil && !IsTemporaryNetworkError(err) { - return nil, err - } - // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication - // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. - if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { - return resp, err - } - delayed := DelayWithRetryAfter(resp, r.Context().Done()) - if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return resp, r.Context().Err() - } - // don't count a 429 against the number of attempts - // so that we continue to retry until it succeeds - if resp == nil || resp.StatusCode != http.StatusTooManyRequests { - attempt++ - } - } - return resp, err - }) - } -} - -// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in -// responses with status code 429 -func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { - if resp == nil { - return false - } - retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) - if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { - select { - case <-time.After(time.Duration(retryAfter) * time.Second): - return true - case <-cancel: - return false - } - } - return false -} - -// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal -// to or greater than the specified duration, exponentially backing off between requests using the -// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the -// optional channel on the http.Request. -func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - end := time.Now().Add(d) - for attempt := 0; time.Now().Before(end); attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - if !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() - } - } - return resp, err - }) - } -} - -// WithLogging returns a SendDecorator that implements simple before and after logging of the -// request. -func WithLogging(logger *log.Logger) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - logger.Printf("Sending %s %s", r.Method, r.URL) - resp, err := s.Do(r) - if err != nil { - logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) - } else { - logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) - } - return resp, err - }) - } -} - -// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set -// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, -// returns false. -// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt -// count. -func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { - select { - case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): - return true - case <-cancel: - return false - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go deleted file mode 100644 index bfddd90b5..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ /dev/null @@ -1,228 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "net" - "net/http" - "net/url" - "reflect" - "strings" - - "github.com/Azure/go-autorest/autorest/adal" -) - -// EncodedAs is a series of constants specifying various data encodings -type EncodedAs string - -const ( - // EncodedAsJSON states that data is encoded as JSON - EncodedAsJSON EncodedAs = "JSON" - - // EncodedAsXML states that data is encoded as Xml - EncodedAsXML EncodedAs = "XML" -) - -// Decoder defines the decoding method json.Decoder and xml.Decoder share -type Decoder interface { - Decode(v interface{}) error -} - -// NewDecoder creates a new decoder appropriate to the passed encoding. -// encodedAs specifies the type of encoding and r supplies the io.Reader containing the -// encoded data. -func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { - if encodedAs == EncodedAsJSON { - return json.NewDecoder(r) - } else if encodedAs == EncodedAsXML { - return xml.NewDecoder(r) - } - return nil -} - -// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy -// is especially useful if there is a chance the data will fail to decode. -// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v -// is the decoding destination. -func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { - b := bytes.Buffer{} - return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) -} - -// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. -// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. -// Further, when it is closed, it ensures that rc is closed as well. -func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { - return &teeReadCloser{rc, io.TeeReader(rc, w)} -} - -type teeReadCloser struct { - rc io.ReadCloser - r io.Reader -} - -func (t *teeReadCloser) Read(p []byte) (int, error) { - return t.r.Read(p) -} - -func (t *teeReadCloser) Close() error { - return t.rc.Close() -} - -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true - } - } - return false -} - -func escapeValueStrings(m map[string]string) map[string]string { - for key, value := range m { - m[key] = url.QueryEscape(value) - } - return m -} - -func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { - mapOfStrings := make(map[string]string) - for key, value := range mapOfInterface { - mapOfStrings[key] = ensureValueString(value) - } - return mapOfStrings -} - -func ensureValueString(value interface{}) string { - if value == nil { - return "" - } - switch v := value.(type) { - case string: - return v - case []byte: - return string(v) - default: - return fmt.Sprintf("%v", v) - } -} - -// MapToValues method converts map[string]interface{} to url.Values. -func MapToValues(m map[string]interface{}) url.Values { - v := url.Values{} - for key, value := range m { - x := reflect.ValueOf(value) - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - for i := 0; i < x.Len(); i++ { - v.Add(key, ensureValueString(x.Index(i))) - } - } else { - v.Add(key, ensureValueString(value)) - } - } - return v -} - -// AsStringSlice method converts interface{} to []string. This expects a -//that the parameter passed to be a slice or array of a type that has the underlying -//type a string. -func AsStringSlice(s interface{}) ([]string, error) { - v := reflect.ValueOf(s) - if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { - return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.") - } - stringSlice := make([]string, 0, v.Len()) - - for i := 0; i < v.Len(); i++ { - stringSlice = append(stringSlice, v.Index(i).String()) - } - return stringSlice, nil -} - -// String method converts interface v to string. If interface is a list, it -// joins list elements using the seperator. Note that only sep[0] will be used for -// joining if any separator is specified. -func String(v interface{}, sep ...string) string { - if len(sep) == 0 { - return ensureValueString(v) - } - stringSlice, ok := v.([]string) - if ok == false { - var err error - stringSlice, err = AsStringSlice(v) - if err != nil { - panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) - } - } - return ensureValueString(strings.Join(stringSlice, sep[0])) -} - -// Encode method encodes url path and query parameters. -func Encode(location string, v interface{}, sep ...string) string { - s := String(v, sep...) - switch strings.ToLower(location) { - case "path": - return pathEscape(s) - case "query": - return queryEscape(s) - default: - return s - } -} - -func pathEscape(s string) string { - return strings.Replace(url.QueryEscape(s), "+", "%20", -1) -} - -func queryEscape(s string) string { - return url.QueryEscape(s) -} - -// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). -// This is mainly useful for long-running operations that use the Azure-AsyncOperation -// header, so we change the initial PUT into a GET to retrieve the final result. -func ChangeToGet(req *http.Request) *http.Request { - req.Method = "GET" - req.Body = nil - req.ContentLength = 0 - req.Header.Del("Content-Length") - return req -} - -// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError -// interface. If err is a DetailedError it will walk the chain of Original errors. -func IsTokenRefreshError(err error) bool { - if _, ok := err.(adal.TokenRefreshError); ok { - return true - } - if de, ok := err.(DetailedError); ok { - return IsTokenRefreshError(de.Original) - } - return false -} - -// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false -// if it's not. If the error doesn't implement the net.Error interface the return value is true. -func IsTemporaryNetworkError(err error) bool { - if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { - return true - } - return false -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go deleted file mode 100644 index 3c6451546..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ /dev/null @@ -1,22 +0,0 @@ -package autorest - -import "github.com/Azure/go-autorest/version" - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Version returns the semantic version (see http://semver.org). -func Version() string { - return version.Number -} diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go deleted file mode 100644 index 756fd80ca..000000000 --- a/vendor/github.com/Azure/go-autorest/logger/logger.go +++ /dev/null @@ -1,328 +0,0 @@ -package logger - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "strings" - "sync" - "time" -) - -// LevelType tells a logger the minimum level to log. When code reports a log entry, -// the LogLevel indicates the level of the log entry. The logger only records entries -// whose level is at least the level it was told to log. See the Log* constants. -// For example, if a logger is configured with LogError, then LogError, LogPanic, -// and LogFatal entries will be logged; lower level entries are ignored. -type LevelType uint32 - -const ( - // LogNone tells a logger not to log any entries passed to it. - LogNone LevelType = iota - - // LogFatal tells a logger to log all LogFatal entries passed to it. - LogFatal - - // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. - LogPanic - - // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. - LogError - - // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogWarning - - // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogInfo - - // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogDebug -) - -const ( - logNone = "NONE" - logFatal = "FATAL" - logPanic = "PANIC" - logError = "ERROR" - logWarning = "WARNING" - logInfo = "INFO" - logDebug = "DEBUG" - logUnknown = "UNKNOWN" -) - -// ParseLevel converts the specified string into the corresponding LevelType. -func ParseLevel(s string) (lt LevelType, err error) { - switch strings.ToUpper(s) { - case logFatal: - lt = LogFatal - case logPanic: - lt = LogPanic - case logError: - lt = LogError - case logWarning: - lt = LogWarning - case logInfo: - lt = LogInfo - case logDebug: - lt = LogDebug - default: - err = fmt.Errorf("bad log level '%s'", s) - } - return -} - -// String implements the stringer interface for LevelType. -func (lt LevelType) String() string { - switch lt { - case LogNone: - return logNone - case LogFatal: - return logFatal - case LogPanic: - return logPanic - case LogError: - return logError - case LogWarning: - return logWarning - case LogInfo: - return logInfo - case LogDebug: - return logDebug - default: - return logUnknown - } -} - -// Filter defines functions for filtering HTTP request/response content. -type Filter struct { - // URL returns a potentially modified string representation of a request URL. - URL func(u *url.URL) string - - // Header returns a potentially modified set of values for the specified key. - // To completely exclude the header key/values return false. - Header func(key string, val []string) (bool, []string) - - // Body returns a potentially modified request/response body. - Body func(b []byte) []byte -} - -func (f Filter) processURL(u *url.URL) string { - if f.URL == nil { - return u.String() - } - return f.URL(u) -} - -func (f Filter) processHeader(k string, val []string) (bool, []string) { - if f.Header == nil { - return true, val - } - return f.Header(k, val) -} - -func (f Filter) processBody(b []byte) []byte { - if f.Body == nil { - return b - } - return f.Body(b) -} - -// Writer defines methods for writing to a logging facility. -type Writer interface { - // Writeln writes the specified message with the standard log entry header and new-line character. - Writeln(level LevelType, message string) - - // Writef writes the specified format specifier with the standard log entry header and no new-line character. - Writef(level LevelType, format string, a ...interface{}) - - // WriteRequest writes the specified HTTP request to the logger if the log level is greater than - // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. - // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no request content is excluded. - WriteRequest(req *http.Request, filter Filter) - - // WriteResponse writes the specified HTTP response to the logger if the log level is greater than - // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. - // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no respone content is excluded. - WriteResponse(resp *http.Response, filter Filter) -} - -// Instance is the default log writer initialized during package init. -// This can be replaced with a custom implementation as required. -var Instance Writer - -// default log level -var logLevel = LogNone - -// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. -// If no value was specified the default value is LogNone. -// Custom loggers can call this to retrieve the configured log level. -func Level() LevelType { - return logLevel -} - -func init() { - // separated for testing purposes - initDefaultLogger() -} - -func initDefaultLogger() { - // init with nilLogger so callers don't have to do a nil check on Default - Instance = nilLogger{} - llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) - if llStr == "" { - return - } - var err error - logLevel, err = ParseLevel(llStr) - if err != nil { - fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) - return - } - if logLevel == LogNone { - return - } - // default to stderr - dest := os.Stderr - lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") - if strings.EqualFold(lfStr, "stdout") { - dest = os.Stdout - } else if lfStr != "" { - lf, err := os.Create(lfStr) - if err == nil { - dest = lf - } else { - fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) - } - } - Instance = fileLogger{ - logLevel: logLevel, - mu: &sync.Mutex{}, - logFile: dest, - } -} - -// the nil logger does nothing -type nilLogger struct{} - -func (nilLogger) Writeln(LevelType, string) {} - -func (nilLogger) Writef(LevelType, string, ...interface{}) {} - -func (nilLogger) WriteRequest(*http.Request, Filter) {} - -func (nilLogger) WriteResponse(*http.Response, Filter) {} - -// A File is used instead of a Logger so the stream can be flushed after every write. -type fileLogger struct { - logLevel LevelType - mu *sync.Mutex // for synchronizing writes to logFile - logFile *os.File -} - -func (fl fileLogger) Writeln(level LevelType, message string) { - fl.Writef(level, "%s\n", message) -} - -func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { - if fl.logLevel >= level { - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) - fl.logFile.Sync() - } -} - -func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { - if req == nil || fl.logLevel < LogInfo { - return - } - b := &bytes.Buffer{} - fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) - // dump headers - for k, v := range req.Header { - if ok, mv := filter.processHeader(k, v); ok { - fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) - } - } - if fl.shouldLogBody(req.Header, req.Body) { - // dump body - body, err := ioutil.ReadAll(req.Body) - if err == nil { - fmt.Fprintln(b, string(filter.processBody(body))) - if nc, ok := req.Body.(io.Seeker); ok { - // rewind to the beginning - nc.Seek(0, io.SeekStart) - } else { - // recreate the body - req.Body = ioutil.NopCloser(bytes.NewReader(body)) - } - } else { - fmt.Fprintf(b, "failed to read body: %v\n", err) - } - } - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprint(fl.logFile, b.String()) - fl.logFile.Sync() -} - -func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { - if resp == nil || fl.logLevel < LogInfo { - return - } - b := &bytes.Buffer{} - fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) - // dump headers - for k, v := range resp.Header { - if ok, mv := filter.processHeader(k, v); ok { - fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) - } - } - if fl.shouldLogBody(resp.Header, resp.Body) { - // dump body - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err == nil { - fmt.Fprintln(b, string(filter.processBody(body))) - resp.Body = ioutil.NopCloser(bytes.NewReader(body)) - } else { - fmt.Fprintf(b, "failed to read body: %v\n", err) - } - } - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprint(fl.logFile, b.String()) - fl.logFile.Sync() -} - -// returns true if the provided body should be included in the log -func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { - ct := header.Get("Content-Type") - return fl.logLevel >= LogDebug && body != nil && strings.Index(ct, "application/octet-stream") == -1 -} - -// creates standard header for log entries, it contains a timestamp and the log level -func entryHeader(level LevelType) string { - // this format provides a fixed number of digits so the size of the timestamp is constant - return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) -} diff --git a/vendor/github.com/Azure/go-autorest/version/version.go b/vendor/github.com/Azure/go-autorest/version/version.go deleted file mode 100644 index 94de7c51e..000000000 --- a/vendor/github.com/Azure/go-autorest/version/version.go +++ /dev/null @@ -1,37 +0,0 @@ -package version - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "runtime" -) - -// Number contains the semantic version of this SDK. -const Number = "v10.15.5" - -var ( - userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - Number, - ) -) - -// UserAgent returns a string containing the Go version, system archityecture and OS, and the go-autorest version. -func UserAgent() string { - return userAgent -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index e9695ef24..10634d173 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -239,6 +239,13 @@ type Config struct { // Key: aws.String("/foo/bar/moo"), // }) EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool } // NewConfig returns a new Config pointer that can be chained with builder @@ -399,6 +406,13 @@ func (c *Config) WithEndpointDiscovery(t bool) *Config { return c } +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + // MergeIn merges the passed in configs into the existing config object. func (c *Config) MergeIn(cfgs ...*Config) { for _, other := range cfgs { @@ -502,6 +516,10 @@ func mergeInConfig(dst *Config, other *Config) { if other.EnableEndpointDiscovery != nil { dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 7d5270298..7bc5da782 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -40,6 +40,7 @@ var throttleCodes = map[string]struct{}{ "RequestThrottled": {}, "TooManyRequestsException": {}, // Lambda functions "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, } // credsExpiredCodes is a collection of error codes which signify the credentials diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go index bcfd947a3..8630683f3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -17,6 +17,8 @@ const ( ParamMinValueErrCode = "ParamMinValueError" // ParamMinLenErrCode is the error code for fields without enough elements. ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" // ParamFormatErrCode is the error code for a field with invalid // format or characters. @@ -237,6 +239,29 @@ func (e *ErrParamMinLen) MinLen() int { return e.min } +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + // An ErrParamFormat represents a invalid format parameter error. type ErrParamFormat struct { errInvalidParam diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 22b9a9727..ea1123147 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.15.83" +const SDKVersion = "1.15.87" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go index f06f44ee1..d7d42db0a 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -1,7 +1,54 @@ package protocol -// ValidHostLabel returns if the label is a valid RFC 1123 Section 2.1 domain -// host label name. +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + labels := strings.Split(host, ".") + + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(host) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. func ValidHostLabel(label string) bool { if l := len(label); l == 0 || l > 63 { return false diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 000000000..915b0fcaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index d5ae54bb6..647191990 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -8474,7 +8474,7 @@ type CopyObjectInput struct { ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // The date and time when you want the copied object's Object Lock to expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp"` + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. @@ -9171,7 +9171,7 @@ type CreateMultipartUploadInput struct { ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // Specifies the date and time when you want the Object Lock to expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp"` + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. @@ -13138,7 +13138,7 @@ type GetObjectOutput struct { ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // The date and time when this object's Object Lock will expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp"` + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` @@ -14213,7 +14213,7 @@ type HeadObjectOutput struct { ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // The date and time when this object's Object Lock will expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp"` + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` @@ -20034,7 +20034,7 @@ type PutObjectInput struct { ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // The date and time when you want this object's Object Lock to expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp"` + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. diff --git a/vendor/github.com/marstr/guid/LICENSE.txt b/vendor/github.com/marstr/guid/LICENSE.txt deleted file mode 100644 index e18a0841a..000000000 --- a/vendor/github.com/marstr/guid/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 Martin Strobel - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/marstr/guid/guid.go b/vendor/github.com/marstr/guid/guid.go deleted file mode 100644 index 51b038b75..000000000 --- a/vendor/github.com/marstr/guid/guid.go +++ /dev/null @@ -1,301 +0,0 @@ -package guid - -import ( - "bytes" - "crypto/rand" - "errors" - "fmt" - "net" - "strings" - "sync" - "time" -) - -// GUID is a unique identifier designed to virtually guarantee non-conflict between values generated -// across a distributed system. -type GUID struct { - timeHighAndVersion uint16 - timeMid uint16 - timeLow uint32 - clockSeqHighAndReserved uint8 - clockSeqLow uint8 - node [6]byte -} - -// Format enumerates the values that are supported by Parse and Format -type Format string - -// These constants define the possible string formats available via this implementation of Guid. -const ( - FormatB Format = "B" // {00000000-0000-0000-0000-000000000000} - FormatD Format = "D" // 00000000-0000-0000-0000-000000000000 - FormatN Format = "N" // 00000000000000000000000000000000 - FormatP Format = "P" // (00000000-0000-0000-0000-000000000000) - FormatX Format = "X" // {0x00000000,0x0000,0x0000,{0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}} - FormatDefault Format = FormatD -) - -// CreationStrategy enumerates the values that are supported for populating the bits of a new Guid. -type CreationStrategy string - -// These constants define the possible creation strategies available via this implementation of Guid. -const ( - CreationStrategyVersion1 CreationStrategy = "version1" - CreationStrategyVersion2 CreationStrategy = "version2" - CreationStrategyVersion3 CreationStrategy = "version3" - CreationStrategyVersion4 CreationStrategy = "version4" - CreationStrategyVersion5 CreationStrategy = "version5" -) - -var emptyGUID GUID - -// NewGUID generates and returns a new globally unique identifier -func NewGUID() GUID { - result, err := version4() - if err != nil { - panic(err) //Version 4 (pseudo-random GUID) doesn't use anything that could fail. - } - return result -} - -var knownStrategies = map[CreationStrategy]func() (GUID, error){ - CreationStrategyVersion1: version1, - CreationStrategyVersion4: version4, -} - -// NewGUIDs generates and returns a new globally unique identifier that conforms to the given strategy. -func NewGUIDs(strategy CreationStrategy) (GUID, error) { - if creator, present := knownStrategies[strategy]; present { - result, err := creator() - return result, err - } - return emptyGUID, errors.New("Unsupported CreationStrategy") -} - -// Empty returns a copy of the default and empty GUID. -func Empty() GUID { - return emptyGUID -} - -var knownFormats = map[Format]string{ - FormatN: "%08x%04x%04x%02x%02x%02x%02x%02x%02x%02x%02x", - FormatD: "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", - FormatB: "{%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x}", - FormatP: "(%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x)", - FormatX: "{0x%08x,0x%04x,0x%04x,{0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x}}", -} - -// MarshalJSON writes a GUID as a JSON string. -func (guid GUID) MarshalJSON() (marshaled []byte, err error) { - buf := bytes.Buffer{} - - _, err = buf.WriteRune('"') - buf.WriteString(guid.String()) - buf.WriteRune('"') - - marshaled = buf.Bytes() - return -} - -// Parse instantiates a GUID from a text representation of the same GUID. -// This is the inverse of function family String() -func Parse(value string) (GUID, error) { - var guid GUID - for _, fullFormat := range knownFormats { - parity, err := fmt.Sscanf( - value, - fullFormat, - &guid.timeLow, - &guid.timeMid, - &guid.timeHighAndVersion, - &guid.clockSeqHighAndReserved, - &guid.clockSeqLow, - &guid.node[0], - &guid.node[1], - &guid.node[2], - &guid.node[3], - &guid.node[4], - &guid.node[5]) - if parity == 11 && err == nil { - return guid, err - } - } - return emptyGUID, fmt.Errorf("\"%s\" is not in a recognized format", value) -} - -// String returns a text representation of a GUID in the default format. -func (guid GUID) String() string { - return guid.Stringf(FormatDefault) -} - -// Stringf returns a text representation of a GUID that conforms to the specified format. -// If an unrecognized format is provided, the empty string is returned. -func (guid GUID) Stringf(format Format) string { - if format == "" { - format = FormatDefault - } - fullFormat, present := knownFormats[format] - if !present { - return "" - } - return fmt.Sprintf( - fullFormat, - guid.timeLow, - guid.timeMid, - guid.timeHighAndVersion, - guid.clockSeqHighAndReserved, - guid.clockSeqLow, - guid.node[0], - guid.node[1], - guid.node[2], - guid.node[3], - guid.node[4], - guid.node[5]) -} - -// UnmarshalJSON parses a GUID from a JSON string token. -func (guid *GUID) UnmarshalJSON(marshaled []byte) (err error) { - if len(marshaled) < 2 { - err = errors.New("JSON GUID must be surrounded by quotes") - return - } - stripped := marshaled[1 : len(marshaled)-1] - *guid, err = Parse(string(stripped)) - return -} - -// Version reads a GUID to parse which mechanism of generating GUIDS was employed. -// Values returned here are documented in rfc4122.txt. -func (guid GUID) Version() uint { - return uint(guid.timeHighAndVersion >> 12) -} - -var unixToGregorianOffset = time.Date(1970, 01, 01, 0, 0, 00, 0, time.UTC).Sub(time.Date(1582, 10, 15, 0, 0, 0, 0, time.UTC)) - -// getRFC4122Time returns a 60-bit count of 100-nanosecond intervals since 00:00:00.00 October 15th, 1582 -func getRFC4122Time() int64 { - currentTime := time.Now().UTC().Add(unixToGregorianOffset).UnixNano() - currentTime /= 100 - return currentTime & 0x0FFFFFFFFFFFFFFF -} - -var clockSeqVal uint16 -var clockSeqKey sync.Mutex - -func getClockSequence() (uint16, error) { - clockSeqKey.Lock() - defer clockSeqKey.Unlock() - - if 0 == clockSeqVal { - var temp [2]byte - if parity, err := rand.Read(temp[:]); !(2 == parity && nil == err) { - return 0, err - } - clockSeqVal = uint16(temp[0])<<8 | uint16(temp[1]) - } - clockSeqVal++ - return clockSeqVal, nil -} - -func getMACAddress() (mac [6]byte, err error) { - var hostNICs []net.Interface - - hostNICs, err = net.Interfaces() - if err != nil { - return - } - - for _, nic := range hostNICs { - var parity int - - parity, err = fmt.Sscanf( - strings.ToLower(nic.HardwareAddr.String()), - "%02x:%02x:%02x:%02x:%02x:%02x", - &mac[0], - &mac[1], - &mac[2], - &mac[3], - &mac[4], - &mac[5]) - - if parity == len(mac) { - return - } - } - - err = fmt.Errorf("No suitable address found") - - return -} - -func version1() (result GUID, err error) { - var localMAC [6]byte - var clockSeq uint16 - - currentTime := getRFC4122Time() - - result.timeLow = uint32(currentTime) - result.timeMid = uint16(currentTime >> 32) - result.timeHighAndVersion = uint16(currentTime >> 48) - if err = result.setVersion(1); err != nil { - return emptyGUID, err - } - - if localMAC, err = getMACAddress(); nil != err { - if parity, err := rand.Read(localMAC[:]); !(len(localMAC) != parity && err == nil) { - return emptyGUID, err - } - localMAC[0] |= 0x1 - } - copy(result.node[:], localMAC[:]) - - if clockSeq, err = getClockSequence(); nil != err { - return emptyGUID, err - } - - result.clockSeqLow = uint8(clockSeq) - result.clockSeqHighAndReserved = uint8(clockSeq >> 8) - - result.setReservedBits() - - return -} - -func version4() (GUID, error) { - var retval GUID - var bits [10]byte - - if parity, err := rand.Read(bits[:]); !(len(bits) == parity && err == nil) { - return emptyGUID, err - } - retval.timeHighAndVersion |= uint16(bits[0]) | uint16(bits[1])<<8 - retval.timeMid |= uint16(bits[2]) | uint16(bits[3])<<8 - retval.timeLow |= uint32(bits[4]) | uint32(bits[5])<<8 | uint32(bits[6])<<16 | uint32(bits[7])<<24 - retval.clockSeqHighAndReserved = uint8(bits[8]) - retval.clockSeqLow = uint8(bits[9]) - - //Randomly set clock-sequence, reserved, and node - if written, err := rand.Read(retval.node[:]); !(nil == err && written == len(retval.node)) { - retval = emptyGUID - return retval, err - } - - if err := retval.setVersion(4); nil != err { - return emptyGUID, err - } - retval.setReservedBits() - - return retval, nil -} - -func (guid *GUID) setVersion(version uint16) error { - if version > 5 || version == 0 { - return fmt.Errorf("While setting GUID version, unsupported version: %d", version) - } - guid.timeHighAndVersion = (guid.timeHighAndVersion & 0x0fff) | version<<12 - return nil -} - -func (guid *GUID) setReservedBits() { - guid.clockSeqHighAndReserved = (guid.clockSeqHighAndReserved & 0x3f) | 0x80 -} diff --git a/vendor/github.com/onsi/ginkgo/extensions/table/table.go b/vendor/github.com/onsi/ginkgo/extensions/table/table.go new file mode 100644 index 000000000..ae8ab7d24 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/extensions/table/table.go @@ -0,0 +1,98 @@ +/* + +Table provides a simple DSL for Ginkgo-native Table-Driven Tests + +The godoc documentation describes Table's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo#table-driven-tests + +*/ + +package table + +import ( + "fmt" + "reflect" + + "github.com/onsi/ginkgo" +) + +/* +DescribeTable describes a table-driven test. + +For example: + + DescribeTable("a simple table", + func(x int, y int, expected bool) { + Ω(x > y).Should(Equal(expected)) + }, + Entry("x > y", 1, 0, true), + Entry("x == y", 0, 0, false), + Entry("x < y", 0, 1, false), + ) + +The first argument to `DescribeTable` is a string description. +The second argument is a function that will be run for each table entry. Your assertions go here - the function is equivalent to a Ginkgo It. +The subsequent arguments must be of type `TableEntry`. We recommend using the `Entry` convenience constructors. + +The `Entry` constructor takes a string description followed by an arbitrary set of parameters. These parameters are passed into your function. + +Under the hood, `DescribeTable` simply generates a new Ginkgo `Describe`. Each `Entry` is turned into an `It` within the `Describe`. + +It's important to understand that the `Describe`s and `It`s are generated at evaluation time (i.e. when Ginkgo constructs the tree of tests and before the tests run). + +Individual Entries can be focused (with FEntry) or marked pending (with PEntry or XEntry). In addition, the entire table can be focused or marked pending with FDescribeTable and PDescribeTable/XDescribeTable. +*/ +func DescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { + describeTable(description, itBody, entries, false, false) + return true +} + +/* +You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. +*/ +func FDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { + describeTable(description, itBody, entries, false, true) + return true +} + +/* +You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. +*/ +func PDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { + describeTable(description, itBody, entries, true, false) + return true +} + +/* +You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`. +*/ +func XDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { + describeTable(description, itBody, entries, true, false) + return true +} + +func describeTable(description string, itBody interface{}, entries []TableEntry, pending bool, focused bool) { + itBodyValue := reflect.ValueOf(itBody) + if itBodyValue.Kind() != reflect.Func { + panic(fmt.Sprintf("DescribeTable expects a function, got %#v", itBody)) + } + + if pending { + ginkgo.PDescribe(description, func() { + for _, entry := range entries { + entry.generateIt(itBodyValue) + } + }) + } else if focused { + ginkgo.FDescribe(description, func() { + for _, entry := range entries { + entry.generateIt(itBodyValue) + } + }) + } else { + ginkgo.Describe(description, func() { + for _, entry := range entries { + entry.generateIt(itBodyValue) + } + }) + } +} diff --git a/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go b/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go new file mode 100644 index 000000000..5fa645bce --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go @@ -0,0 +1,81 @@ +package table + +import ( + "reflect" + + "github.com/onsi/ginkgo" +) + +/* +TableEntry represents an entry in a table test. You generally use the `Entry` constructor. +*/ +type TableEntry struct { + Description string + Parameters []interface{} + Pending bool + Focused bool +} + +func (t TableEntry) generateIt(itBody reflect.Value) { + if t.Pending { + ginkgo.PIt(t.Description) + return + } + + values := []reflect.Value{} + for i, param := range t.Parameters { + var value reflect.Value + + if param == nil { + inType := itBody.Type().In(i) + value = reflect.Zero(inType) + } else { + value = reflect.ValueOf(param) + } + + values = append(values, value) + } + + body := func() { + itBody.Call(values) + } + + if t.Focused { + ginkgo.FIt(t.Description, body) + } else { + ginkgo.It(t.Description, body) + } +} + +/* +Entry constructs a TableEntry. + +The first argument is a required description (this becomes the content of the generated Ginkgo `It`). +Subsequent parameters are saved off and sent to the callback passed in to `DescribeTable`. + +Each Entry ends up generating an individual Ginkgo It. +*/ +func Entry(description string, parameters ...interface{}) TableEntry { + return TableEntry{description, parameters, false, false} +} + +/* +You can focus a particular entry with FEntry. This is equivalent to FIt. +*/ +func FEntry(description string, parameters ...interface{}) TableEntry { + return TableEntry{description, parameters, false, true} +} + +/* +You can mark a particular entry as pending with PEntry. This is equivalent to PIt. +*/ +func PEntry(description string, parameters ...interface{}) TableEntry { + return TableEntry{description, parameters, true, false} +} + +/* +You can mark a particular entry as pending with XEntry. This is equivalent to XIt. +*/ +func XEntry(description string, parameters ...interface{}) TableEntry { + return TableEntry{description, parameters, true, false} +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go new file mode 100644 index 000000000..02e2b3b32 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go @@ -0,0 +1,123 @@ +package convert + +import ( + "fmt" + "go/ast" + "strings" + "unicode" +) + +/* + * Creates a func init() node + */ +func createVarUnderscoreBlock() *ast.ValueSpec { + valueSpec := &ast.ValueSpec{} + object := &ast.Object{Kind: 4, Name: "_", Decl: valueSpec, Data: 0} + ident := &ast.Ident{Name: "_", Obj: object} + valueSpec.Names = append(valueSpec.Names, ident) + return valueSpec +} + +/* + * Creates a Describe("Testing with ginkgo", func() { }) node + */ +func createDescribeBlock() *ast.CallExpr { + blockStatement := &ast.BlockStmt{List: []ast.Stmt{}} + + fieldList := &ast.FieldList{} + funcType := &ast.FuncType{Params: fieldList} + funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement} + basicLit := &ast.BasicLit{Kind: 9, Value: "\"Testing with Ginkgo\""} + describeIdent := &ast.Ident{Name: "Describe"} + return &ast.CallExpr{Fun: describeIdent, Args: []ast.Expr{basicLit, funcLit}} +} + +/* + * Convenience function to return the name of the *testing.T param + * for a Test function that will be rewritten. This is useful because + * we will want to replace the usage of this named *testing.T inside the + * body of the function with a GinktoT. + */ +func namedTestingTArg(node *ast.FuncDecl) string { + return node.Type.Params.List[0].Names[0].Name // *exhale* +} + +/* + * Convenience function to return the block statement node for a Describe statement + */ +func blockStatementFromDescribe(desc *ast.CallExpr) *ast.BlockStmt { + var funcLit *ast.FuncLit + var found = false + + for _, node := range desc.Args { + switch node := node.(type) { + case *ast.FuncLit: + found = true + funcLit = node + break + } + } + + if !found { + panic("Error finding ast.FuncLit inside describe statement. Somebody done goofed.") + } + + return funcLit.Body +} + +/* convenience function for creating an It("TestNameHere") + * with all the body of the test function inside the anonymous + * func passed to It() + */ +func createItStatementForTestFunc(testFunc *ast.FuncDecl) *ast.ExprStmt { + blockStatement := &ast.BlockStmt{List: testFunc.Body.List} + fieldList := &ast.FieldList{} + funcType := &ast.FuncType{Params: fieldList} + funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement} + + testName := rewriteTestName(testFunc.Name.Name) + basicLit := &ast.BasicLit{Kind: 9, Value: fmt.Sprintf("\"%s\"", testName)} + itBlockIdent := &ast.Ident{Name: "It"} + callExpr := &ast.CallExpr{Fun: itBlockIdent, Args: []ast.Expr{basicLit, funcLit}} + return &ast.ExprStmt{X: callExpr} +} + +/* +* rewrite test names to be human readable +* eg: rewrites "TestSomethingAmazing" as "something amazing" + */ +func rewriteTestName(testName string) string { + nameComponents := []string{} + currentString := "" + indexOfTest := strings.Index(testName, "Test") + if indexOfTest != 0 { + return testName + } + + testName = strings.Replace(testName, "Test", "", 1) + first, rest := testName[0], testName[1:] + testName = string(unicode.ToLower(rune(first))) + rest + + for _, rune := range testName { + if unicode.IsUpper(rune) { + nameComponents = append(nameComponents, currentString) + currentString = string(unicode.ToLower(rune)) + } else { + currentString += string(rune) + } + } + + return strings.Join(append(nameComponents, currentString), " ") +} + +func newGinkgoTFromIdent(ident *ast.Ident) *ast.CallExpr { + return &ast.CallExpr{ + Lparen: ident.NamePos + 1, + Rparen: ident.NamePos + 2, + Fun: &ast.Ident{Name: "GinkgoT"}, + } +} + +func newGinkgoTInterface() *ast.Ident { + return &ast.Ident{Name: "GinkgoTInterface"} +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/import.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/import.go new file mode 100644 index 000000000..e226196f7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/import.go @@ -0,0 +1,91 @@ +package convert + +import ( + "errors" + "fmt" + "go/ast" +) + +/* + * Given the root node of an AST, returns the node containing the + * import statements for the file. + */ +func importsForRootNode(rootNode *ast.File) (imports *ast.GenDecl, err error) { + for _, declaration := range rootNode.Decls { + decl, ok := declaration.(*ast.GenDecl) + if !ok || len(decl.Specs) == 0 { + continue + } + + _, ok = decl.Specs[0].(*ast.ImportSpec) + if ok { + imports = decl + return + } + } + + err = errors.New(fmt.Sprintf("Could not find imports for root node:\n\t%#v\n", rootNode)) + return +} + +/* + * Removes "testing" import, if present + */ +func removeTestingImport(rootNode *ast.File) { + importDecl, err := importsForRootNode(rootNode) + if err != nil { + panic(err.Error()) + } + + var index int + for i, importSpec := range importDecl.Specs { + importSpec := importSpec.(*ast.ImportSpec) + if importSpec.Path.Value == "\"testing\"" { + index = i + break + } + } + + importDecl.Specs = append(importDecl.Specs[:index], importDecl.Specs[index+1:]...) +} + +/* + * Adds import statements for onsi/ginkgo, if missing + */ +func addGinkgoImports(rootNode *ast.File) { + importDecl, err := importsForRootNode(rootNode) + if err != nil { + panic(err.Error()) + } + + if len(importDecl.Specs) == 0 { + // TODO: might need to create a import decl here + panic("unimplemented : expected to find an imports block") + } + + needsGinkgo := true + for _, importSpec := range importDecl.Specs { + importSpec, ok := importSpec.(*ast.ImportSpec) + if !ok { + continue + } + + if importSpec.Path.Value == "\"github.com/onsi/ginkgo\"" { + needsGinkgo = false + } + } + + if needsGinkgo { + importDecl.Specs = append(importDecl.Specs, createImport(".", "\"github.com/onsi/ginkgo\"")) + } +} + +/* + * convenience function to create an import statement + */ +func createImport(name, path string) *ast.ImportSpec { + return &ast.ImportSpec{ + Name: &ast.Ident{Name: name}, + Path: &ast.BasicLit{Kind: 9, Value: path}, + } +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go new file mode 100644 index 000000000..ed09c460d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go @@ -0,0 +1,127 @@ +package convert + +import ( + "fmt" + "go/build" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" +) + +/* + * RewritePackage takes a name (eg: my-package/tools), finds its test files using + * Go's build package, and then rewrites them. A ginkgo test suite file will + * also be added for this package, and all of its child packages. + */ +func RewritePackage(packageName string) { + pkg, err := packageWithName(packageName) + if err != nil { + panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error())) + } + + for _, filename := range findTestsInPackage(pkg) { + rewriteTestsInFile(filename) + } + return +} + +/* + * Given a package, findTestsInPackage reads the test files in the directory, + * and then recurses on each child package, returning a slice of all test files + * found in this process. + */ +func findTestsInPackage(pkg *build.Package) (testfiles []string) { + for _, file := range append(pkg.TestGoFiles, pkg.XTestGoFiles...) { + testfiles = append(testfiles, filepath.Join(pkg.Dir, file)) + } + + dirFiles, err := ioutil.ReadDir(pkg.Dir) + if err != nil { + panic(fmt.Sprintf("unexpected error reading dir: '%s'\n%s\n", pkg.Dir, err.Error())) + } + + re := regexp.MustCompile(`^[._]`) + + for _, file := range dirFiles { + if !file.IsDir() { + continue + } + + if re.Match([]byte(file.Name())) { + continue + } + + packageName := filepath.Join(pkg.ImportPath, file.Name()) + subPackage, err := packageWithName(packageName) + if err != nil { + panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error())) + } + + testfiles = append(testfiles, findTestsInPackage(subPackage)...) + } + + addGinkgoSuiteForPackage(pkg) + goFmtPackage(pkg) + return +} + +/* + * Shells out to `ginkgo bootstrap` to create a test suite file + */ +func addGinkgoSuiteForPackage(pkg *build.Package) { + originalDir, err := os.Getwd() + if err != nil { + panic(err) + } + + suite_test_file := filepath.Join(pkg.Dir, pkg.Name+"_suite_test.go") + + _, err = os.Stat(suite_test_file) + if err == nil { + return // test file already exists, this should be a no-op + } + + err = os.Chdir(pkg.Dir) + if err != nil { + panic(err) + } + + output, err := exec.Command("ginkgo", "bootstrap").Output() + + if err != nil { + panic(fmt.Sprintf("error running 'ginkgo bootstrap'.\nstdout: %s\n%s\n", output, err.Error())) + } + + err = os.Chdir(originalDir) + if err != nil { + panic(err) + } +} + +/* + * Shells out to `go fmt` to format the package + */ +func goFmtPackage(pkg *build.Package) { + output, err := exec.Command("go", "fmt", pkg.ImportPath).Output() + + if err != nil { + fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", pkg.ImportPath, output, err.Error()) + } +} + +/* + * Attempts to return a package with its test files already read. + * The ImportMode arg to build.Import lets you specify if you want go to read the + * buildable go files inside the package, but it fails if the package has no go files + */ +func packageWithName(name string) (pkg *build.Package, err error) { + pkg, err = build.Default.Import(name, ".", build.ImportMode(0)) + if err == nil { + return + } + + pkg, err = build.Default.Import(name, ".", build.ImportMode(1)) + return +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go new file mode 100644 index 000000000..b33595c9a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go @@ -0,0 +1,56 @@ +package convert + +import ( + "go/ast" + "regexp" +) + +/* + * Given a root node, walks its top level statements and returns + * points to function nodes to rewrite as It statements. + * These functions, according to Go testing convention, must be named + * TestWithCamelCasedName and receive a single *testing.T argument. + */ +func findTestFuncs(rootNode *ast.File) (testsToRewrite []*ast.FuncDecl) { + testNameRegexp := regexp.MustCompile("^Test[0-9A-Z].+") + + ast.Inspect(rootNode, func(node ast.Node) bool { + if node == nil { + return false + } + + switch node := node.(type) { + case *ast.FuncDecl: + matches := testNameRegexp.MatchString(node.Name.Name) + + if matches && receivesTestingT(node) { + testsToRewrite = append(testsToRewrite, node) + } + } + + return true + }) + + return +} + +/* + * convenience function that looks at args to a function and determines if its + * params include an argument of type *testing.T + */ +func receivesTestingT(node *ast.FuncDecl) bool { + if len(node.Type.Params.List) != 1 { + return false + } + + base, ok := node.Type.Params.List[0].Type.(*ast.StarExpr) + if !ok { + return false + } + + intermediate := base.X.(*ast.SelectorExpr) + isTestingPackage := intermediate.X.(*ast.Ident).Name == "testing" + isTestingT := intermediate.Sel.Name == "T" + + return isTestingPackage && isTestingT +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go new file mode 100644 index 000000000..4b001a7db --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go @@ -0,0 +1,163 @@ +package convert + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "io/ioutil" + "os" +) + +/* + * Given a file path, rewrites any tests in the Ginkgo format. + * First, we parse the AST, and update the imports declaration. + * Then, we walk the first child elements in the file, returning tests to rewrite. + * A top level init func is declared, with a single Describe func inside. + * Then the test functions to rewrite are inserted as It statements inside the Describe. + * Finally we walk the rest of the file, replacing other usages of *testing.T + * Once that is complete, we write the AST back out again to its file. + */ +func rewriteTestsInFile(pathToFile string) { + fileSet := token.NewFileSet() + rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, 0) + if err != nil { + panic(fmt.Sprintf("Error parsing test file '%s':\n%s\n", pathToFile, err.Error())) + } + + addGinkgoImports(rootNode) + removeTestingImport(rootNode) + + varUnderscoreBlock := createVarUnderscoreBlock() + describeBlock := createDescribeBlock() + varUnderscoreBlock.Values = []ast.Expr{describeBlock} + + for _, testFunc := range findTestFuncs(rootNode) { + rewriteTestFuncAsItStatement(testFunc, rootNode, describeBlock) + } + + underscoreDecl := &ast.GenDecl{ + Tok: 85, // gah, magick numbers are needed to make this work + TokPos: 14, // this tricks Go into writing "var _ = Describe" + Specs: []ast.Spec{varUnderscoreBlock}, + } + + imports := rootNode.Decls[0] + tail := rootNode.Decls[1:] + rootNode.Decls = append(append([]ast.Decl{imports}, underscoreDecl), tail...) + rewriteOtherFuncsToUseGinkgoT(rootNode.Decls) + walkNodesInRootNodeReplacingTestingT(rootNode) + + var buffer bytes.Buffer + if err = format.Node(&buffer, fileSet, rootNode); err != nil { + panic(fmt.Sprintf("Error formatting ast node after rewriting tests.\n%s\n", err.Error())) + } + + fileInfo, err := os.Stat(pathToFile) + if err != nil { + panic(fmt.Sprintf("Error stat'ing file: %s\n", pathToFile)) + } + + ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode()) + return +} + +/* + * Given a test func named TestDoesSomethingNeat, rewrites it as + * It("does something neat", func() { __test_body_here__ }) and adds it + * to the Describe's list of statements + */ +func rewriteTestFuncAsItStatement(testFunc *ast.FuncDecl, rootNode *ast.File, describe *ast.CallExpr) { + var funcIndex int = -1 + for index, child := range rootNode.Decls { + if child == testFunc { + funcIndex = index + break + } + } + + if funcIndex < 0 { + panic(fmt.Sprintf("Assert failed: Error finding index for test node %s\n", testFunc.Name.Name)) + } + + var block *ast.BlockStmt = blockStatementFromDescribe(describe) + block.List = append(block.List, createItStatementForTestFunc(testFunc)) + replaceTestingTsWithGinkgoT(block, namedTestingTArg(testFunc)) + + // remove the old test func from the root node's declarations + rootNode.Decls = append(rootNode.Decls[:funcIndex], rootNode.Decls[funcIndex+1:]...) + return +} + +/* + * walks nodes inside of a test func's statements and replaces the usage of + * it's named *testing.T param with GinkgoT's + */ +func replaceTestingTsWithGinkgoT(statementsBlock *ast.BlockStmt, testingT string) { + ast.Inspect(statementsBlock, func(node ast.Node) bool { + if node == nil { + return false + } + + keyValueExpr, ok := node.(*ast.KeyValueExpr) + if ok { + replaceNamedTestingTsInKeyValueExpression(keyValueExpr, testingT) + return true + } + + funcLiteral, ok := node.(*ast.FuncLit) + if ok { + replaceTypeDeclTestingTsInFuncLiteral(funcLiteral) + return true + } + + callExpr, ok := node.(*ast.CallExpr) + if !ok { + return true + } + replaceTestingTsInArgsLists(callExpr, testingT) + + funCall, ok := callExpr.Fun.(*ast.SelectorExpr) + if ok { + replaceTestingTsMethodCalls(funCall, testingT) + } + + return true + }) +} + +/* + * rewrite t.Fail() or any other *testing.T method by replacing with T().Fail() + * This function receives a selector expression (eg: t.Fail()) and + * the name of the *testing.T param from the function declaration. Rewrites the + * selector expression in place if the target was a *testing.T + */ +func replaceTestingTsMethodCalls(selectorExpr *ast.SelectorExpr, testingT string) { + ident, ok := selectorExpr.X.(*ast.Ident) + if !ok { + return + } + + if ident.Name == testingT { + selectorExpr.X = newGinkgoTFromIdent(ident) + } +} + +/* + * replaces usages of a named *testing.T param inside of a call expression + * with a new GinkgoT object + */ +func replaceTestingTsInArgsLists(callExpr *ast.CallExpr, testingT string) { + for index, arg := range callExpr.Args { + ident, ok := arg.(*ast.Ident) + if !ok { + continue + } + + if ident.Name == testingT { + callExpr.Args[index] = newGinkgoTFromIdent(ident) + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go new file mode 100644 index 000000000..418cdc4e5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go @@ -0,0 +1,130 @@ +package convert + +import ( + "go/ast" +) + +/* + * Rewrites any other top level funcs that receive a *testing.T param + */ +func rewriteOtherFuncsToUseGinkgoT(declarations []ast.Decl) { + for _, decl := range declarations { + decl, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + + for _, param := range decl.Type.Params.List { + starExpr, ok := param.Type.(*ast.StarExpr) + if !ok { + continue + } + + selectorExpr, ok := starExpr.X.(*ast.SelectorExpr) + if !ok { + continue + } + + xIdent, ok := selectorExpr.X.(*ast.Ident) + if !ok || xIdent.Name != "testing" { + continue + } + + if selectorExpr.Sel.Name != "T" { + continue + } + + param.Type = newGinkgoTInterface() + } + } +} + +/* + * Walks all of the nodes in the file, replacing *testing.T in struct + * and func literal nodes. eg: + * type foo struct { *testing.T } + * var bar = func(t *testing.T) { } + */ +func walkNodesInRootNodeReplacingTestingT(rootNode *ast.File) { + ast.Inspect(rootNode, func(node ast.Node) bool { + if node == nil { + return false + } + + switch node := node.(type) { + case *ast.StructType: + replaceTestingTsInStructType(node) + case *ast.FuncLit: + replaceTypeDeclTestingTsInFuncLiteral(node) + } + + return true + }) +} + +/* + * replaces named *testing.T inside a composite literal + */ +func replaceNamedTestingTsInKeyValueExpression(kve *ast.KeyValueExpr, testingT string) { + ident, ok := kve.Value.(*ast.Ident) + if !ok { + return + } + + if ident.Name == testingT { + kve.Value = newGinkgoTFromIdent(ident) + } +} + +/* + * replaces *testing.T params in a func literal with GinkgoT + */ +func replaceTypeDeclTestingTsInFuncLiteral(functionLiteral *ast.FuncLit) { + for _, arg := range functionLiteral.Type.Params.List { + starExpr, ok := arg.Type.(*ast.StarExpr) + if !ok { + continue + } + + selectorExpr, ok := starExpr.X.(*ast.SelectorExpr) + if !ok { + continue + } + + target, ok := selectorExpr.X.(*ast.Ident) + if !ok { + continue + } + + if target.Name == "testing" && selectorExpr.Sel.Name == "T" { + arg.Type = newGinkgoTInterface() + } + } +} + +/* + * Replaces *testing.T types inside of a struct declaration with a GinkgoT + * eg: type foo struct { *testing.T } + */ +func replaceTestingTsInStructType(structType *ast.StructType) { + for _, field := range structType.Fields.List { + starExpr, ok := field.Type.(*ast.StarExpr) + if !ok { + continue + } + + selectorExpr, ok := starExpr.X.(*ast.SelectorExpr) + if !ok { + continue + } + + xIdent, ok := selectorExpr.X.(*ast.Ident) + if !ok { + continue + } + + if xIdent.Name == "testing" && selectorExpr.Sel.Name == "T" { + field.Type = newGinkgoTInterface() + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go new file mode 100644 index 000000000..c15db0b02 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go @@ -0,0 +1,52 @@ +package interrupthandler + +import ( + "os" + "os/signal" + "sync" + "syscall" +) + +type InterruptHandler struct { + interruptCount int + lock *sync.Mutex + C chan bool +} + +func NewInterruptHandler() *InterruptHandler { + h := &InterruptHandler{ + lock: &sync.Mutex{}, + C: make(chan bool, 0), + } + + go h.handleInterrupt() + SwallowSigQuit() + + return h +} + +func (h *InterruptHandler) WasInterrupted() bool { + h.lock.Lock() + defer h.lock.Unlock() + + return h.interruptCount > 0 +} + +func (h *InterruptHandler) handleInterrupt() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + + <-c + signal.Stop(c) + + h.lock.Lock() + h.interruptCount++ + if h.interruptCount == 1 { + close(h.C) + } else if h.interruptCount > 5 { + os.Exit(1) + } + h.lock.Unlock() + + go h.handleInterrupt() +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go new file mode 100644 index 000000000..43c18544a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go @@ -0,0 +1,14 @@ +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package interrupthandler + +import ( + "os" + "os/signal" + "syscall" +) + +func SwallowSigQuit() { + c := make(chan os.Signal, 1024) + signal.Notify(c, syscall.SIGQUIT) +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go new file mode 100644 index 000000000..7f4a50e19 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package interrupthandler + +func SwallowSigQuit() { + //noop +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go b/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go new file mode 100644 index 000000000..3f7237c60 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go @@ -0,0 +1,194 @@ +package nodot + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "path/filepath" + "strings" +) + +func ApplyNoDot(data []byte) ([]byte, error) { + sections, err := generateNodotSections() + if err != nil { + return nil, err + } + + for _, section := range sections { + data = section.createOrUpdateIn(data) + } + + return data, nil +} + +type nodotSection struct { + name string + pkg string + declarations []string + types []string +} + +func (s nodotSection) createOrUpdateIn(data []byte) []byte { + renames := map[string]string{} + + contents := string(data) + + lines := strings.Split(contents, "\n") + + comment := "// Declarations for " + s.name + + newLines := []string{} + for _, line := range lines { + if line == comment { + continue + } + + words := strings.Split(line, " ") + lastWord := words[len(words)-1] + + if s.containsDeclarationOrType(lastWord) { + renames[lastWord] = words[1] + continue + } + + newLines = append(newLines, line) + } + + if len(newLines[len(newLines)-1]) > 0 { + newLines = append(newLines, "") + } + + newLines = append(newLines, comment) + + for _, typ := range s.types { + name, ok := renames[s.prefix(typ)] + if !ok { + name = typ + } + newLines = append(newLines, fmt.Sprintf("type %s %s", name, s.prefix(typ))) + } + + for _, decl := range s.declarations { + name, ok := renames[s.prefix(decl)] + if !ok { + name = decl + } + newLines = append(newLines, fmt.Sprintf("var %s = %s", name, s.prefix(decl))) + } + + newLines = append(newLines, "") + + newContents := strings.Join(newLines, "\n") + + return []byte(newContents) +} + +func (s nodotSection) prefix(declOrType string) string { + return s.pkg + "." + declOrType +} + +func (s nodotSection) containsDeclarationOrType(word string) bool { + for _, declaration := range s.declarations { + if s.prefix(declaration) == word { + return true + } + } + + for _, typ := range s.types { + if s.prefix(typ) == word { + return true + } + } + + return false +} + +func generateNodotSections() ([]nodotSection, error) { + sections := []nodotSection{} + + declarations, err := getExportedDeclerationsForPackage("github.com/onsi/ginkgo", "ginkgo_dsl.go", "GINKGO_VERSION", "GINKGO_PANIC") + if err != nil { + return nil, err + } + sections = append(sections, nodotSection{ + name: "Ginkgo DSL", + pkg: "ginkgo", + declarations: declarations, + types: []string{"Done", "Benchmarker"}, + }) + + declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "gomega_dsl.go", "GOMEGA_VERSION") + if err != nil { + return nil, err + } + sections = append(sections, nodotSection{ + name: "Gomega DSL", + pkg: "gomega", + declarations: declarations, + }) + + declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "matchers.go") + if err != nil { + return nil, err + } + sections = append(sections, nodotSection{ + name: "Gomega Matchers", + pkg: "gomega", + declarations: declarations, + }) + + return sections, nil +} + +func getExportedDeclerationsForPackage(pkgPath string, filename string, blacklist ...string) ([]string, error) { + pkg, err := build.Import(pkgPath, ".", 0) + if err != nil { + return []string{}, err + } + + declarations, err := getExportedDeclarationsForFile(filepath.Join(pkg.Dir, filename)) + if err != nil { + return []string{}, err + } + + blacklistLookup := map[string]bool{} + for _, declaration := range blacklist { + blacklistLookup[declaration] = true + } + + filteredDeclarations := []string{} + for _, declaration := range declarations { + if blacklistLookup[declaration] { + continue + } + filteredDeclarations = append(filteredDeclarations, declaration) + } + + return filteredDeclarations, nil +} + +func getExportedDeclarationsForFile(path string) ([]string, error) { + fset := token.NewFileSet() + tree, err := parser.ParseFile(fset, path, nil, 0) + if err != nil { + return []string{}, err + } + + declarations := []string{} + ast.FileExports(tree) + for _, decl := range tree.Decls { + switch x := decl.(type) { + case *ast.GenDecl: + switch s := x.Specs[0].(type) { + case *ast.ValueSpec: + declarations = append(declarations, s.Names[0].Name) + } + case *ast.FuncDecl: + declarations = append(declarations, x.Name.Name) + } + } + + return declarations, nil +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args.go new file mode 100644 index 000000000..3b1a238c2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args.go @@ -0,0 +1,7 @@ +// +build go1.10 + +package testrunner + +var ( + buildArgs = []string{"test", "-c"} +) diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args_old.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args_old.go new file mode 100644 index 000000000..14d70dbcc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args_old.go @@ -0,0 +1,7 @@ +// +build !go1.10 + +package testrunner + +var ( + buildArgs = []string{"test", "-c", "-i"} +) diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go new file mode 100644 index 000000000..a73a6e379 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go @@ -0,0 +1,52 @@ +package testrunner + +import ( + "bytes" + "fmt" + "io" + "log" + "strings" + "sync" +) + +type logWriter struct { + buffer *bytes.Buffer + lock *sync.Mutex + log *log.Logger +} + +func newLogWriter(target io.Writer, node int) *logWriter { + return &logWriter{ + buffer: &bytes.Buffer{}, + lock: &sync.Mutex{}, + log: log.New(target, fmt.Sprintf("[%d] ", node), 0), + } +} + +func (w *logWriter) Write(data []byte) (n int, err error) { + w.lock.Lock() + defer w.lock.Unlock() + + w.buffer.Write(data) + contents := w.buffer.String() + + lines := strings.Split(contents, "\n") + for _, line := range lines[0 : len(lines)-1] { + w.log.Println(line) + } + + w.buffer.Reset() + w.buffer.Write([]byte(lines[len(lines)-1])) + return len(data), nil +} + +func (w *logWriter) Close() error { + w.lock.Lock() + defer w.lock.Unlock() + + if w.buffer.Len() > 0 { + w.log.Println(w.buffer.String()) + } + + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go new file mode 100644 index 000000000..5d472acb8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go @@ -0,0 +1,27 @@ +package testrunner + +type RunResult struct { + Passed bool + HasProgrammaticFocus bool +} + +func PassingRunResult() RunResult { + return RunResult{ + Passed: true, + HasProgrammaticFocus: false, + } +} + +func FailingRunResult() RunResult { + return RunResult{ + Passed: false, + HasProgrammaticFocus: false, + } +} + +func (r RunResult) Merge(o RunResult) RunResult { + return RunResult{ + Passed: r.Passed && o.Passed, + HasProgrammaticFocus: r.HasProgrammaticFocus || o.HasProgrammaticFocus, + } +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go new file mode 100644 index 000000000..a0113e136 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go @@ -0,0 +1,556 @@ +package testrunner + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/ginkgo/testsuite" + "github.com/onsi/ginkgo/internal/remote" + "github.com/onsi/ginkgo/reporters/stenographer" + colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" + "github.com/onsi/ginkgo/types" +) + +type TestRunner struct { + Suite testsuite.TestSuite + + compiled bool + compilationTargetPath string + + numCPU int + parallelStream bool + timeout time.Duration + goOpts map[string]interface{} + additionalArgs []string + stderr *bytes.Buffer + + CoverageFile string +} + +func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, timeout time.Duration, goOpts map[string]interface{}, additionalArgs []string) *TestRunner { + runner := &TestRunner{ + Suite: suite, + numCPU: numCPU, + parallelStream: parallelStream, + goOpts: goOpts, + additionalArgs: additionalArgs, + timeout: timeout, + stderr: new(bytes.Buffer), + } + + if !suite.Precompiled { + dir, err := ioutil.TempDir("", "ginkgo") + if err != nil { + panic(fmt.Sprintf("couldn't create temporary directory... might be time to rm -rf:\n%s", err.Error())) + } + runner.compilationTargetPath = filepath.Join(dir, suite.PackageName+".test") + } + + return runner +} + +func (t *TestRunner) Compile() error { + return t.CompileTo(t.compilationTargetPath) +} + +func (t *TestRunner) BuildArgs(path string) []string { + args := make([]string, len(buildArgs), len(buildArgs)+3) + copy(args, buildArgs) + args = append(args, "-o", path, t.Suite.Path) + + if t.getCoverMode() != "" { + args = append(args, "-cover", fmt.Sprintf("-covermode=%s", t.getCoverMode())) + } else { + if t.shouldCover() || t.getCoverPackage() != "" { + args = append(args, "-cover", "-covermode=atomic") + } + } + + boolOpts := []string{ + "a", + "n", + "msan", + "race", + "x", + "work", + "linkshared", + } + + for _, opt := range boolOpts { + if s, found := t.goOpts[opt].(*bool); found && *s { + args = append(args, fmt.Sprintf("-%s", opt)) + } + } + + intOpts := []string{ + "memprofilerate", + "blockprofilerate", + } + + for _, opt := range intOpts { + if s, found := t.goOpts[opt].(*int); found { + args = append(args, fmt.Sprintf("-%s=%d", opt, *s)) + } + } + + stringOpts := []string{ + "asmflags", + "buildmode", + "compiler", + "gccgoflags", + "installsuffix", + "ldflags", + "pkgdir", + "toolexec", + "coverprofile", + "cpuprofile", + "memprofile", + "outputdir", + "coverpkg", + "tags", + "gcflags", + } + + for _, opt := range stringOpts { + if s, found := t.goOpts[opt].(*string); found && *s != "" { + args = append(args, fmt.Sprintf("-%s=%s", opt, *s)) + } + } + return args +} + +func (t *TestRunner) CompileTo(path string) error { + if t.compiled { + return nil + } + + if t.Suite.Precompiled { + return nil + } + + args := t.BuildArgs(path) + cmd := exec.Command("go", args...) + + output, err := cmd.CombinedOutput() + + if err != nil { + if len(output) > 0 { + return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, output) + } + return fmt.Errorf("Failed to compile %s", t.Suite.PackageName) + } + + if len(output) > 0 { + fmt.Println(string(output)) + } + + if fileExists(path) == false { + compiledFile := t.Suite.PackageName + ".test" + if fileExists(compiledFile) { + // seems like we are on an old go version that does not support the -o flag on go test + // move the compiled test file to the desired location by hand + err = os.Rename(compiledFile, path) + if err != nil { + // We cannot move the file, perhaps because the source and destination + // are on different partitions. We can copy the file, however. + err = copyFile(compiledFile, path) + if err != nil { + return fmt.Errorf("Failed to copy compiled file: %s", err) + } + } + } else { + return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path) + } + } + + t.compiled = true + + return nil +} + +func fileExists(path string) bool { + _, err := os.Stat(path) + return err == nil || os.IsNotExist(err) == false +} + +// copyFile copies the contents of the file named src to the file named +// by dst. The file will be created if it does not already exist. If the +// destination file exists, all it's contents will be replaced by the contents +// of the source file. +func copyFile(src, dst string) error { + srcInfo, err := os.Stat(src) + if err != nil { + return err + } + mode := srcInfo.Mode() + + in, err := os.Open(src) + if err != nil { + return err + } + + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + + defer func() { + closeErr := out.Close() + if err == nil { + err = closeErr + } + }() + + _, err = io.Copy(out, in) + if err != nil { + return err + } + + err = out.Sync() + if err != nil { + return err + } + + return out.Chmod(mode) +} + +func (t *TestRunner) Run() RunResult { + if t.Suite.IsGinkgo { + if t.numCPU > 1 { + if t.parallelStream { + return t.runAndStreamParallelGinkgoSuite() + } else { + return t.runParallelGinkgoSuite() + } + } else { + return t.runSerialGinkgoSuite() + } + } else { + return t.runGoTestSuite() + } +} + +func (t *TestRunner) CleanUp() { + if t.Suite.Precompiled { + return + } + os.RemoveAll(filepath.Dir(t.compilationTargetPath)) +} + +func (t *TestRunner) runSerialGinkgoSuite() RunResult { + ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) + return t.run(t.cmd(ginkgoArgs, os.Stdout, 1), nil) +} + +func (t *TestRunner) runGoTestSuite() RunResult { + return t.run(t.cmd([]string{"-test.v"}, os.Stdout, 1), nil) +} + +func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult { + completions := make(chan RunResult) + writers := make([]*logWriter, t.numCPU) + + server, err := remote.NewServer(t.numCPU) + if err != nil { + panic("Failed to start parallel spec server") + } + + server.Start() + defer server.Close() + + for cpu := 0; cpu < t.numCPU; cpu++ { + config.GinkgoConfig.ParallelNode = cpu + 1 + config.GinkgoConfig.ParallelTotal = t.numCPU + config.GinkgoConfig.SyncHost = server.Address() + + ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) + + writers[cpu] = newLogWriter(os.Stdout, cpu+1) + + cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1) + + server.RegisterAlive(cpu+1, func() bool { + if cmd.ProcessState == nil { + return true + } + return !cmd.ProcessState.Exited() + }) + + go t.run(cmd, completions) + } + + res := PassingRunResult() + + for cpu := 0; cpu < t.numCPU; cpu++ { + res = res.Merge(<-completions) + } + + for _, writer := range writers { + writer.Close() + } + + os.Stdout.Sync() + + if t.shouldCombineCoverprofiles() { + t.combineCoverprofiles() + } + + return res +} + +func (t *TestRunner) runParallelGinkgoSuite() RunResult { + result := make(chan bool) + completions := make(chan RunResult) + writers := make([]*logWriter, t.numCPU) + reports := make([]*bytes.Buffer, t.numCPU) + + stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout()) + aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer) + + server, err := remote.NewServer(t.numCPU) + if err != nil { + panic("Failed to start parallel spec server") + } + server.RegisterReporters(aggregator) + server.Start() + defer server.Close() + + for cpu := 0; cpu < t.numCPU; cpu++ { + config.GinkgoConfig.ParallelNode = cpu + 1 + config.GinkgoConfig.ParallelTotal = t.numCPU + config.GinkgoConfig.SyncHost = server.Address() + config.GinkgoConfig.StreamHost = server.Address() + + ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) + + reports[cpu] = &bytes.Buffer{} + writers[cpu] = newLogWriter(reports[cpu], cpu+1) + + cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1) + + server.RegisterAlive(cpu+1, func() bool { + if cmd.ProcessState == nil { + return true + } + return !cmd.ProcessState.Exited() + }) + + go t.run(cmd, completions) + } + + res := PassingRunResult() + + for cpu := 0; cpu < t.numCPU; cpu++ { + res = res.Merge(<-completions) + } + + //all test processes are done, at this point + //we should be able to wait for the aggregator to tell us that it's done + + select { + case <-result: + fmt.Println("") + case <-time.After(time.Second): + //the aggregator never got back to us! something must have gone wrong + fmt.Println(` + ------------------------------------------------------------------- + | | + | Ginkgo timed out waiting for all parallel nodes to report back! | + | | + -------------------------------------------------------------------`) + fmt.Println("\n", t.Suite.PackageName, "timed out. path:", t.Suite.Path) + os.Stdout.Sync() + + for _, writer := range writers { + writer.Close() + } + + for _, report := range reports { + fmt.Print(report.String()) + } + + os.Stdout.Sync() + } + + if t.shouldCombineCoverprofiles() { + t.combineCoverprofiles() + } + + return res +} + +const CoverProfileSuffix = ".coverprofile" + +func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd { + args := []string{"--test.timeout=" + t.timeout.String()} + + coverProfile := t.getCoverProfile() + + if t.shouldCombineCoverprofiles() { + + testCoverProfile := "--test.coverprofile=" + + coverageFile := "" + // Set default name for coverage results + if coverProfile == "" { + coverageFile = t.Suite.PackageName + CoverProfileSuffix + } else { + coverageFile = coverProfile + } + + testCoverProfile += coverageFile + + t.CoverageFile = filepath.Join(t.Suite.Path, coverageFile) + + if t.numCPU > 1 { + testCoverProfile = fmt.Sprintf("%s.%d", testCoverProfile, node) + } + args = append(args, testCoverProfile) + } + + args = append(args, ginkgoArgs...) + args = append(args, t.additionalArgs...) + + path := t.compilationTargetPath + if t.Suite.Precompiled { + path, _ = filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName))) + } + + cmd := exec.Command(path, args...) + + cmd.Dir = t.Suite.Path + cmd.Stderr = io.MultiWriter(stream, t.stderr) + cmd.Stdout = stream + + return cmd +} + +func (t *TestRunner) shouldCover() bool { + return *t.goOpts["cover"].(*bool) +} + +func (t *TestRunner) shouldRequireSuite() bool { + return *t.goOpts["requireSuite"].(*bool) +} + +func (t *TestRunner) getCoverProfile() string { + return *t.goOpts["coverprofile"].(*string) +} + +func (t *TestRunner) getCoverPackage() string { + return *t.goOpts["coverpkg"].(*string) +} + +func (t *TestRunner) getCoverMode() string { + return *t.goOpts["covermode"].(*string) +} + +func (t *TestRunner) shouldCombineCoverprofiles() bool { + return t.shouldCover() || t.getCoverPackage() != "" || t.getCoverMode() != "" +} + +func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult { + var res RunResult + + defer func() { + if completions != nil { + completions <- res + } + }() + + err := cmd.Start() + if err != nil { + fmt.Printf("Failed to run test suite!\n\t%s", err.Error()) + return res + } + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + res.Passed = (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + + if strings.Contains(t.stderr.String(), "warning: no tests to run") { + if t.shouldRequireSuite() { + res.Passed = false + } + fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`) + } + + return res +} + +func (t *TestRunner) combineCoverprofiles() { + profiles := []string{} + + coverProfile := t.getCoverProfile() + + for cpu := 1; cpu <= t.numCPU; cpu++ { + var coverFile string + if coverProfile == "" { + coverFile = fmt.Sprintf("%s%s.%d", t.Suite.PackageName, CoverProfileSuffix, cpu) + } else { + coverFile = fmt.Sprintf("%s.%d", coverProfile, cpu) + } + + coverFile = filepath.Join(t.Suite.Path, coverFile) + coverProfile, err := ioutil.ReadFile(coverFile) + os.Remove(coverFile) + + if err == nil { + profiles = append(profiles, string(coverProfile)) + } + } + + if len(profiles) != t.numCPU { + return + } + + lines := map[string]int{} + lineOrder := []string{} + for i, coverProfile := range profiles { + for _, line := range strings.Split(string(coverProfile), "\n")[1:] { + if len(line) == 0 { + continue + } + components := strings.Split(line, " ") + count, _ := strconv.Atoi(components[len(components)-1]) + prefix := strings.Join(components[0:len(components)-1], " ") + lines[prefix] += count + if i == 0 { + lineOrder = append(lineOrder, prefix) + } + } + } + + output := []string{"mode: atomic"} + for _, line := range lineOrder { + output = append(output, fmt.Sprintf("%s %d", line, lines[line])) + } + finalOutput := strings.Join(output, "\n") + + finalFilename := "" + + if coverProfile != "" { + finalFilename = coverProfile + } else { + finalFilename = fmt.Sprintf("%s%s", t.Suite.PackageName, CoverProfileSuffix) + } + + coverageFilepath := filepath.Join(t.Suite.Path, finalFilename) + ioutil.WriteFile(coverageFilepath, []byte(finalOutput), 0666) + + t.CoverageFile = coverageFilepath +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go new file mode 100644 index 000000000..9de8c2bb4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go @@ -0,0 +1,115 @@ +package testsuite + +import ( + "errors" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" +) + +type TestSuite struct { + Path string + PackageName string + IsGinkgo bool + Precompiled bool +} + +func PrecompiledTestSuite(path string) (TestSuite, error) { + info, err := os.Stat(path) + if err != nil { + return TestSuite{}, err + } + + if info.IsDir() { + return TestSuite{}, errors.New("this is a directory, not a file") + } + + if filepath.Ext(path) != ".test" { + return TestSuite{}, errors.New("this is not a .test binary") + } + + if info.Mode()&0111 == 0 { + return TestSuite{}, errors.New("this is not executable") + } + + dir := relPath(filepath.Dir(path)) + packageName := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)) + + return TestSuite{ + Path: dir, + PackageName: packageName, + IsGinkgo: true, + Precompiled: true, + }, nil +} + +func SuitesInDir(dir string, recurse bool) []TestSuite { + suites := []TestSuite{} + + if vendorExperimentCheck(dir) { + return suites + } + + files, _ := ioutil.ReadDir(dir) + re := regexp.MustCompile(`^[^._].*_test\.go$`) + for _, file := range files { + if !file.IsDir() && re.Match([]byte(file.Name())) { + suites = append(suites, New(dir, files)) + break + } + } + + if recurse { + re = regexp.MustCompile(`^[._]`) + for _, file := range files { + if file.IsDir() && !re.Match([]byte(file.Name())) { + suites = append(suites, SuitesInDir(dir+"/"+file.Name(), recurse)...) + } + } + } + + return suites +} + +func relPath(dir string) string { + dir, _ = filepath.Abs(dir) + cwd, _ := os.Getwd() + dir, _ = filepath.Rel(cwd, filepath.Clean(dir)) + + if string(dir[0]) != "." { + dir = "." + string(filepath.Separator) + dir + } + + return dir +} + +func New(dir string, files []os.FileInfo) TestSuite { + return TestSuite{ + Path: relPath(dir), + PackageName: packageNameForSuite(dir), + IsGinkgo: filesHaveGinkgoSuite(dir, files), + } +} + +func packageNameForSuite(dir string) string { + path, _ := filepath.Abs(dir) + return filepath.Base(path) +} + +func filesHaveGinkgoSuite(dir string, files []os.FileInfo) bool { + reTestFile := regexp.MustCompile(`_test\.go$`) + reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"`) + + for _, file := range files { + if !file.IsDir() && reTestFile.Match([]byte(file.Name())) { + contents, _ := ioutil.ReadFile(dir + "/" + file.Name()) + if reGinkgo.Match(contents) { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15.go b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15.go new file mode 100644 index 000000000..75f827a12 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15.go @@ -0,0 +1,16 @@ +// +build !go1.6 + +package testsuite + +import ( + "os" + "path" +) + +// "This change will only be enabled if the go command is run with +// GO15VENDOREXPERIMENT=1 in its environment." +// c.f. the vendor-experiment proposal https://goo.gl/2ucMeC +func vendorExperimentCheck(dir string) bool { + vendorExperiment := os.Getenv("GO15VENDOREXPERIMENT") + return vendorExperiment == "1" && path.Base(dir) == "vendor" +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go16.go b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go16.go new file mode 100644 index 000000000..596e5e5c1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go16.go @@ -0,0 +1,15 @@ +// +build go1.6 + +package testsuite + +import ( + "os" + "path" +) + +// in 1.6 the vendor directory became the default go behaviour, so now +// check if its disabled. +func vendorExperimentCheck(dir string) bool { + vendorExperiment := os.Getenv("GO15VENDOREXPERIMENT") + return vendorExperiment != "0" && path.Base(dir) == "vendor" +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta.go new file mode 100644 index 000000000..6c485c5b1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta.go @@ -0,0 +1,22 @@ +package watch + +import "sort" + +type Delta struct { + ModifiedPackages []string + + NewSuites []*Suite + RemovedSuites []*Suite + modifiedSuites []*Suite +} + +type DescendingByDelta []*Suite + +func (a DescendingByDelta) Len() int { return len(a) } +func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() } + +func (d Delta) ModifiedSuites() []*Suite { + sort.Sort(DescendingByDelta(d.modifiedSuites)) + return d.modifiedSuites +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go new file mode 100644 index 000000000..a628303d7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go @@ -0,0 +1,75 @@ +package watch + +import ( + "fmt" + + "regexp" + + "github.com/onsi/ginkgo/ginkgo/testsuite" +) + +type SuiteErrors map[testsuite.TestSuite]error + +type DeltaTracker struct { + maxDepth int + watchRegExp *regexp.Regexp + suites map[string]*Suite + packageHashes *PackageHashes +} + +func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker { + return &DeltaTracker{ + maxDepth: maxDepth, + watchRegExp: watchRegExp, + packageHashes: NewPackageHashes(watchRegExp), + suites: map[string]*Suite{}, + } +} + +func (d *DeltaTracker) Delta(suites []testsuite.TestSuite) (delta Delta, errors SuiteErrors) { + errors = SuiteErrors{} + delta.ModifiedPackages = d.packageHashes.CheckForChanges() + + providedSuitePaths := map[string]bool{} + for _, suite := range suites { + providedSuitePaths[suite.Path] = true + } + + d.packageHashes.StartTrackingUsage() + + for _, suite := range d.suites { + if providedSuitePaths[suite.Suite.Path] { + if suite.Delta() > 0 { + delta.modifiedSuites = append(delta.modifiedSuites, suite) + } + } else { + delta.RemovedSuites = append(delta.RemovedSuites, suite) + } + } + + d.packageHashes.StopTrackingUsageAndPrune() + + for _, suite := range suites { + _, ok := d.suites[suite.Path] + if !ok { + s, err := NewSuite(suite, d.maxDepth, d.packageHashes) + if err != nil { + errors[suite] = err + continue + } + d.suites[suite.Path] = s + delta.NewSuites = append(delta.NewSuites, s) + } + } + + return delta, errors +} + +func (d *DeltaTracker) WillRun(suite testsuite.TestSuite) error { + s, ok := d.suites[suite.Path] + if !ok { + return fmt.Errorf("unknown suite %s", suite.Path) + } + + return s.MarkAsRunAndRecomputedDependencies(d.maxDepth) +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go new file mode 100644 index 000000000..82c25face --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go @@ -0,0 +1,91 @@ +package watch + +import ( + "go/build" + "regexp" +) + +var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) + +type Dependencies struct { + deps map[string]int +} + +func NewDependencies(path string, maxDepth int) (Dependencies, error) { + d := Dependencies{ + deps: map[string]int{}, + } + + if maxDepth == 0 { + return d, nil + } + + err := d.seedWithDepsForPackageAtPath(path) + if err != nil { + return d, err + } + + for depth := 1; depth < maxDepth; depth++ { + n := len(d.deps) + d.addDepsForDepth(depth) + if n == len(d.deps) { + break + } + } + + return d, nil +} + +func (d Dependencies) Dependencies() map[string]int { + return d.deps +} + +func (d Dependencies) seedWithDepsForPackageAtPath(path string) error { + pkg, err := build.ImportDir(path, 0) + if err != nil { + return err + } + + d.resolveAndAdd(pkg.Imports, 1) + d.resolveAndAdd(pkg.TestImports, 1) + d.resolveAndAdd(pkg.XTestImports, 1) + + delete(d.deps, pkg.Dir) + return nil +} + +func (d Dependencies) addDepsForDepth(depth int) { + for dep, depDepth := range d.deps { + if depDepth == depth { + d.addDepsForDep(dep, depth+1) + } + } +} + +func (d Dependencies) addDepsForDep(dep string, depth int) { + pkg, err := build.ImportDir(dep, 0) + if err != nil { + println(err.Error()) + return + } + d.resolveAndAdd(pkg.Imports, depth) +} + +func (d Dependencies) resolveAndAdd(deps []string, depth int) { + for _, dep := range deps { + pkg, err := build.Import(dep, ".", 0) + if err != nil { + continue + } + if pkg.Goroot == false && !ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) { + d.addDepIfNotPresent(pkg.Dir, depth) + } + } +} + +func (d Dependencies) addDepIfNotPresent(dep string, depth int) { + _, ok := d.deps[dep] + if !ok { + d.deps[dep] = depth + } +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go new file mode 100644 index 000000000..7e1e4192d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go @@ -0,0 +1,104 @@ +package watch + +import ( + "fmt" + "io/ioutil" + "os" + "regexp" + "time" +) + +var goTestRegExp = regexp.MustCompile(`_test\.go$`) + +type PackageHash struct { + CodeModifiedTime time.Time + TestModifiedTime time.Time + Deleted bool + + path string + codeHash string + testHash string + watchRegExp *regexp.Regexp +} + +func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash { + p := &PackageHash{ + path: path, + watchRegExp: watchRegExp, + } + + p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes() + + return p +} + +func (p *PackageHash) CheckForChanges() bool { + codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes() + + if deleted { + if p.Deleted == false { + t := time.Now() + p.CodeModifiedTime = t + p.TestModifiedTime = t + } + p.Deleted = true + return true + } + + modified := false + p.Deleted = false + + if p.codeHash != codeHash { + p.CodeModifiedTime = codeModifiedTime + modified = true + } + if p.testHash != testHash { + p.TestModifiedTime = testModifiedTime + modified = true + } + + p.codeHash = codeHash + p.testHash = testHash + return modified +} + +func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) { + infos, err := ioutil.ReadDir(p.path) + + if err != nil { + deleted = true + return + } + + for _, info := range infos { + if info.IsDir() { + continue + } + + if goTestRegExp.Match([]byte(info.Name())) { + testHash += p.hashForFileInfo(info) + if info.ModTime().After(testModifiedTime) { + testModifiedTime = info.ModTime() + } + continue + } + + if p.watchRegExp.Match([]byte(info.Name())) { + codeHash += p.hashForFileInfo(info) + if info.ModTime().After(codeModifiedTime) { + codeModifiedTime = info.ModTime() + } + } + } + + testHash += codeHash + if codeModifiedTime.After(testModifiedTime) { + testModifiedTime = codeModifiedTime + } + + return +} + +func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { + return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go new file mode 100644 index 000000000..b4892bebf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go @@ -0,0 +1,85 @@ +package watch + +import ( + "path/filepath" + "regexp" + "sync" +) + +type PackageHashes struct { + PackageHashes map[string]*PackageHash + usedPaths map[string]bool + watchRegExp *regexp.Regexp + lock *sync.Mutex +} + +func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes { + return &PackageHashes{ + PackageHashes: map[string]*PackageHash{}, + usedPaths: nil, + watchRegExp: watchRegExp, + lock: &sync.Mutex{}, + } +} + +func (p *PackageHashes) CheckForChanges() []string { + p.lock.Lock() + defer p.lock.Unlock() + + modified := []string{} + + for _, packageHash := range p.PackageHashes { + if packageHash.CheckForChanges() { + modified = append(modified, packageHash.path) + } + } + + return modified +} + +func (p *PackageHashes) Add(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + _, ok := p.PackageHashes[path] + if !ok { + p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp) + } + + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) Get(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) StartTrackingUsage() { + p.lock.Lock() + defer p.lock.Unlock() + + p.usedPaths = map[string]bool{} +} + +func (p *PackageHashes) StopTrackingUsageAndPrune() { + p.lock.Lock() + defer p.lock.Unlock() + + for path := range p.PackageHashes { + if !p.usedPaths[path] { + delete(p.PackageHashes, path) + } + } + + p.usedPaths = nil +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go new file mode 100644 index 000000000..5deaba7cb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go @@ -0,0 +1,87 @@ +package watch + +import ( + "fmt" + "math" + "time" + + "github.com/onsi/ginkgo/ginkgo/testsuite" +) + +type Suite struct { + Suite testsuite.TestSuite + RunTime time.Time + Dependencies Dependencies + + sharedPackageHashes *PackageHashes +} + +func NewSuite(suite testsuite.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) { + deps, err := NewDependencies(suite.Path, maxDepth) + if err != nil { + return nil, err + } + + sharedPackageHashes.Add(suite.Path) + for dep := range deps.Dependencies() { + sharedPackageHashes.Add(dep) + } + + return &Suite{ + Suite: suite, + Dependencies: deps, + + sharedPackageHashes: sharedPackageHashes, + }, nil +} + +func (s *Suite) Delta() float64 { + delta := s.delta(s.Suite.Path, true, 0) * 1000 + for dep, depth := range s.Dependencies.Dependencies() { + delta += s.delta(dep, false, depth) + } + return delta +} + +func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error { + s.RunTime = time.Now() + + deps, err := NewDependencies(s.Suite.Path, maxDepth) + if err != nil { + return err + } + + s.sharedPackageHashes.Add(s.Suite.Path) + for dep := range deps.Dependencies() { + s.sharedPackageHashes.Add(dep) + } + + s.Dependencies = deps + + return nil +} + +func (s *Suite) Description() string { + numDeps := len(s.Dependencies.Dependencies()) + pluralizer := "ies" + if numDeps == 1 { + pluralizer = "y" + } + return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer) +} + +func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 { + return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1) +} + +func (s *Suite) dt(packagePath string, includeTests bool) time.Duration { + packageHash := s.sharedPackageHashes.Get(packagePath) + var modifiedTime time.Time + if includeTests { + modifiedTime = packageHash.TestModifiedTime + } else { + modifiedTime = packageHash.CodeModifiedTime + } + + return modifiedTime.Sub(s.RunTime) +} diff --git a/vendor/github.com/onsi/gomega/gbytes/buffer.go b/vendor/github.com/onsi/gomega/gbytes/buffer.go new file mode 100644 index 000000000..336086f4a --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/buffer.go @@ -0,0 +1,245 @@ +/* +Package gbytes provides a buffer that supports incrementally detecting input. + +You use gbytes.Buffer with the gbytes.Say matcher. When Say finds a match, it fastforwards the buffer's read cursor to the end of that match. + +Subsequent matches against the buffer will only operate against data that appears *after* the read cursor. + +The read cursor is an opaque implementation detail that you cannot access. You should use the Say matcher to sift through the buffer. You can always +access the entire buffer's contents with Contents(). + +*/ +package gbytes + +import ( + "errors" + "fmt" + "io" + "regexp" + "sync" + "time" +) + +/* +gbytes.Buffer implements an io.Writer and can be used with the gbytes.Say matcher. + +You should only use a gbytes.Buffer in test code. It stores all writes in an in-memory buffer - behavior that is inappropriate for production code! +*/ +type Buffer struct { + contents []byte + readCursor uint64 + lock *sync.Mutex + detectCloser chan interface{} + closed bool +} + +/* +NewBuffer returns a new gbytes.Buffer +*/ +func NewBuffer() *Buffer { + return &Buffer{ + lock: &sync.Mutex{}, + } +} + +/* +BufferWithBytes returns a new gbytes.Buffer seeded with the passed in bytes +*/ +func BufferWithBytes(bytes []byte) *Buffer { + return &Buffer{ + lock: &sync.Mutex{}, + contents: bytes, + } +} + +/* +BufferReader returns a new gbytes.Buffer that wraps a reader. The reader's contents are read into +the Buffer via io.Copy +*/ +func BufferReader(reader io.Reader) *Buffer { + b := &Buffer{ + lock: &sync.Mutex{}, + } + + go func() { + io.Copy(b, reader) + b.Close() + }() + + return b +} + +/* +Write implements the io.Writer interface +*/ +func (b *Buffer) Write(p []byte) (n int, err error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.closed { + return 0, errors.New("attempt to write to closed buffer") + } + + b.contents = append(b.contents, p...) + return len(p), nil +} + +/* +Read implements the io.Reader interface. It advances the +cursor as it reads. + +Returns an error if called after Close. +*/ +func (b *Buffer) Read(d []byte) (int, error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.closed { + return 0, errors.New("attempt to read from closed buffer") + } + + if uint64(len(b.contents)) <= b.readCursor { + return 0, io.EOF + } + + n := copy(d, b.contents[b.readCursor:]) + b.readCursor += uint64(n) + + return n, nil +} + +/* +Close signifies that the buffer will no longer be written to +*/ +func (b *Buffer) Close() error { + b.lock.Lock() + defer b.lock.Unlock() + + b.closed = true + + return nil +} + +/* +Closed returns true if the buffer has been closed +*/ +func (b *Buffer) Closed() bool { + b.lock.Lock() + defer b.lock.Unlock() + + return b.closed +} + +/* +Contents returns all data ever written to the buffer. +*/ +func (b *Buffer) Contents() []byte { + b.lock.Lock() + defer b.lock.Unlock() + + contents := make([]byte, len(b.contents)) + copy(contents, b.contents) + return contents +} + +/* +Detect takes a regular expression and returns a channel. + +The channel will receive true the first time data matching the regular expression is written to the buffer. +The channel is subsequently closed and the buffer's read-cursor is fast-forwarded to just after the matching region. + +You typically don't need to use Detect and should use the ghttp.Say matcher instead. Detect is useful, however, in cases where your code must +be branch and handle different outputs written to the buffer. + +For example, consider a buffer hooked up to the stdout of a client library. You may (or may not, depending on state outside of your control) need to authenticate the client library. + +You could do something like: + +select { +case <-buffer.Detect("You are not logged in"): + //log in +case <-buffer.Detect("Success"): + //carry on +case <-time.After(time.Second): + //welp +} +buffer.CancelDetects() + +You should always call CancelDetects after using Detect. This will close any channels that have not detected and clean up the goroutines that were spawned to support them. + +Finally, you can pass detect a format string followed by variadic arguments. This will construct the regexp using fmt.Sprintf. +*/ +func (b *Buffer) Detect(desired string, args ...interface{}) chan bool { + formattedRegexp := desired + if len(args) > 0 { + formattedRegexp = fmt.Sprintf(desired, args...) + } + re := regexp.MustCompile(formattedRegexp) + + b.lock.Lock() + defer b.lock.Unlock() + + if b.detectCloser == nil { + b.detectCloser = make(chan interface{}) + } + + closer := b.detectCloser + response := make(chan bool) + go func() { + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + defer close(response) + for { + select { + case <-ticker.C: + b.lock.Lock() + data, cursor := b.contents[b.readCursor:], b.readCursor + loc := re.FindIndex(data) + b.lock.Unlock() + + if loc != nil { + response <- true + b.lock.Lock() + newCursorPosition := cursor + uint64(loc[1]) + if newCursorPosition >= b.readCursor { + b.readCursor = newCursorPosition + } + b.lock.Unlock() + return + } + case <-closer: + return + } + } + }() + + return response +} + +/* +CancelDetects cancels any pending detects and cleans up their goroutines. You should always call this when you're done with a set of Detect channels. +*/ +func (b *Buffer) CancelDetects() { + b.lock.Lock() + defer b.lock.Unlock() + + close(b.detectCloser) + b.detectCloser = nil +} + +func (b *Buffer) didSay(re *regexp.Regexp) (bool, []byte) { + b.lock.Lock() + defer b.lock.Unlock() + + unreadBytes := b.contents[b.readCursor:] + copyOfUnreadBytes := make([]byte, len(unreadBytes)) + copy(copyOfUnreadBytes, unreadBytes) + + loc := re.FindIndex(unreadBytes) + + if loc != nil { + b.readCursor += uint64(loc[1]) + return true, copyOfUnreadBytes + } + return false, copyOfUnreadBytes +} diff --git a/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go b/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go new file mode 100644 index 000000000..3caed8769 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go @@ -0,0 +1,85 @@ +package gbytes + +import ( + "errors" + "io" + "time" +) + +// ErrTimeout is returned by TimeoutCloser, TimeoutReader, and TimeoutWriter when the underlying Closer/Reader/Writer does not return within the specified timeout +var ErrTimeout = errors.New("timeout occurred") + +// TimeoutCloser returns an io.Closer that wraps the passed-in io.Closer. If the underlying Closer fails to close within the alloted timeout ErrTimeout is returned. +func TimeoutCloser(c io.Closer, timeout time.Duration) io.Closer { + return timeoutReaderWriterCloser{c: c, d: timeout} +} + +// TimeoutReader returns an io.Reader that wraps the passed-in io.Reader. If the underlying Reader fails to read within the alloted timeout ErrTimeout is returned. +func TimeoutReader(r io.Reader, timeout time.Duration) io.Reader { + return timeoutReaderWriterCloser{r: r, d: timeout} +} + +// TimeoutWriter returns an io.Writer that wraps the passed-in io.Writer. If the underlying Writer fails to write within the alloted timeout ErrTimeout is returned. +func TimeoutWriter(w io.Writer, timeout time.Duration) io.Writer { + return timeoutReaderWriterCloser{w: w, d: timeout} +} + +type timeoutReaderWriterCloser struct { + c io.Closer + w io.Writer + r io.Reader + d time.Duration +} + +func (t timeoutReaderWriterCloser) Close() error { + done := make(chan struct{}) + var err error + + go func() { + err = t.c.Close() + close(done) + }() + + select { + case <-done: + return err + case <-time.After(t.d): + return ErrTimeout + } +} + +func (t timeoutReaderWriterCloser) Read(p []byte) (int, error) { + done := make(chan struct{}) + var n int + var err error + + go func() { + n, err = t.r.Read(p) + close(done) + }() + + select { + case <-done: + return n, err + case <-time.After(t.d): + return 0, ErrTimeout + } +} + +func (t timeoutReaderWriterCloser) Write(p []byte) (int, error) { + done := make(chan struct{}) + var n int + var err error + + go func() { + n, err = t.w.Write(p) + close(done) + }() + + select { + case <-done: + return n, err + case <-time.After(t.d): + return 0, ErrTimeout + } +} diff --git a/vendor/github.com/onsi/gomega/gbytes/say_matcher.go b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go new file mode 100644 index 000000000..14317182b --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go @@ -0,0 +1,104 @@ +package gbytes + +import ( + "fmt" + "regexp" + + "github.com/onsi/gomega/format" +) + +//Objects satisfying the BufferProvider can be used with the Say matcher. +type BufferProvider interface { + Buffer() *Buffer +} + +/* +Say is a Gomega matcher that operates on gbytes.Buffers: + + Expect(buffer).Should(Say("something")) + +will succeed if the unread portion of the buffer matches the regular expression "something". + +When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the succesful match. +Thus, subsequent calls to Say will only match against the unread portion of the buffer + +Say pairs very well with Eventually. To assert that a buffer eventually receives data matching "[123]-star" within 3 seconds you can: + + Eventually(buffer, 3).Should(Say("[123]-star")) + +Ditto with consistently. To assert that a buffer does not receive data matching "never-see-this" for 1 second you can: + + Consistently(buffer, 1).ShouldNot(Say("never-see-this")) + +In addition to bytes.Buffers, Say can operate on objects that implement the gbytes.BufferProvider interface. +In such cases, Say simply operates on the *gbytes.Buffer returned by Buffer() + +If the buffer is closed, the Say matcher will tell Eventually to abort. +*/ +func Say(expected string, args ...interface{}) *sayMatcher { + if len(args) > 0 { + expected = fmt.Sprintf(expected, args...) + } + return &sayMatcher{ + re: regexp.MustCompile(expected), + } +} + +type sayMatcher struct { + re *regexp.Regexp + receivedSayings []byte +} + +func (m *sayMatcher) buffer(actual interface{}) (*Buffer, bool) { + var buffer *Buffer + + switch x := actual.(type) { + case *Buffer: + buffer = x + case BufferProvider: + buffer = x.Buffer() + default: + return nil, false + } + + return buffer, true +} + +func (m *sayMatcher) Match(actual interface{}) (success bool, err error) { + buffer, ok := m.buffer(actual) + if !ok { + return false, fmt.Errorf("Say must be passed a *gbytes.Buffer or BufferProvider. Got:\n%s", format.Object(actual, 1)) + } + + didSay, sayings := buffer.didSay(m.re) + m.receivedSayings = sayings + + return didSay, nil +} + +func (m *sayMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf( + "Got stuck at:\n%s\nWaiting for:\n%s", + format.IndentString(string(m.receivedSayings), 1), + format.IndentString(m.re.String(), 1), + ) +} + +func (m *sayMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf( + "Saw:\n%s\nWhich matches the unexpected:\n%s", + format.IndentString(string(m.receivedSayings), 1), + format.IndentString(m.re.String(), 1), + ) +} + +func (m *sayMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + switch x := actual.(type) { + case *Buffer: + return !x.Closed() + case BufferProvider: + return !x.Buffer().Closed() + default: + return true + } +} diff --git a/vendor/github.com/onsi/gomega/gexec/build.go b/vendor/github.com/onsi/gomega/gexec/build.go new file mode 100644 index 000000000..869c1ead8 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/build.go @@ -0,0 +1,112 @@ +package gexec + +import ( + "errors" + "fmt" + "go/build" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strings" + "sync" +) + +var ( + mu sync.Mutex + tmpDir string +) + +/* +Build uses go build to compile the package at packagePath. The resulting binary is saved off in a temporary directory. +A path pointing to this binary is returned. + +Build uses the $GOPATH set in your environment. If $GOPATH is not set and you are using Go 1.8+, +it will use the default GOPATH instead. It passes the variadic args on to `go build`. +*/ +func Build(packagePath string, args ...string) (compiledPath string, err error) { + return doBuild(build.Default.GOPATH, packagePath, nil, args...) +} + +/* +BuildWithEnvironment is identical to Build but allows you to specify env vars to be set at build time. +*/ +func BuildWithEnvironment(packagePath string, env []string, args ...string) (compiledPath string, err error) { + return doBuild(build.Default.GOPATH, packagePath, env, args...) +} + +/* +BuildIn is identical to Build but allows you to specify a custom $GOPATH (the first argument). +*/ +func BuildIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) { + return doBuild(gopath, packagePath, nil, args...) +} + +func replaceGoPath(environ []string, newGoPath string) []string { + newEnviron := []string{} + for _, v := range environ { + if !strings.HasPrefix(v, "GOPATH=") { + newEnviron = append(newEnviron, v) + } + } + return append(newEnviron, "GOPATH="+newGoPath) +} + +func doBuild(gopath, packagePath string, env []string, args ...string) (compiledPath string, err error) { + tmpDir, err := temporaryDirectory() + if err != nil { + return "", err + } + + if len(gopath) == 0 { + return "", errors.New("$GOPATH not provided when building " + packagePath) + } + + executable := filepath.Join(tmpDir, path.Base(packagePath)) + if runtime.GOOS == "windows" { + executable = executable + ".exe" + } + + cmdArgs := append([]string{"build"}, args...) + cmdArgs = append(cmdArgs, "-o", executable, packagePath) + + build := exec.Command("go", cmdArgs...) + build.Env = replaceGoPath(os.Environ(), gopath) + build.Env = append(build.Env, env...) + + output, err := build.CombinedOutput() + if err != nil { + return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output)) + } + + return executable, nil +} + +/* +You should call CleanupBuildArtifacts before your test ends to clean up any temporary artifacts generated by +gexec. In Ginkgo this is typically done in an AfterSuite callback. +*/ +func CleanupBuildArtifacts() { + mu.Lock() + defer mu.Unlock() + if tmpDir != "" { + os.RemoveAll(tmpDir) + tmpDir = "" + } +} + +func temporaryDirectory() (string, error) { + var err error + mu.Lock() + defer mu.Unlock() + if tmpDir == "" { + tmpDir, err = ioutil.TempDir("", "gexec_artifacts") + if err != nil { + return "", err + } + } + + return ioutil.TempDir(tmpDir, "g") +} diff --git a/vendor/github.com/onsi/gomega/gexec/exit_matcher.go b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go new file mode 100644 index 000000000..98a354937 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go @@ -0,0 +1,86 @@ +package gexec + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +/* +The Exit matcher operates on a session: + + Expect(session).Should(Exit()) + +Exit passes if the session has already exited. + +If no status code is provided, then Exit will succeed if the session has exited regardless of exit code. +Otherwise, Exit will only succeed if the process has exited with the provided status code. + +Note that the process must have already exited. To wait for a process to exit, use Eventually: + + Eventually(session, 3).Should(Exit(0)) +*/ +func Exit(optionalExitCode ...int) *exitMatcher { + exitCode := -1 + if len(optionalExitCode) > 0 { + exitCode = optionalExitCode[0] + } + + return &exitMatcher{ + exitCode: exitCode, + } +} + +type exitMatcher struct { + exitCode int + didExit bool + actualExitCode int +} + +type Exiter interface { + ExitCode() int +} + +func (m *exitMatcher) Match(actual interface{}) (success bool, err error) { + exiter, ok := actual.(Exiter) + if !ok { + return false, fmt.Errorf("Exit must be passed a gexec.Exiter (Missing method ExitCode() int) Got:\n%s", format.Object(actual, 1)) + } + + m.actualExitCode = exiter.ExitCode() + + if m.actualExitCode == -1 { + return false, nil + } + + if m.exitCode == -1 { + return true, nil + } + return m.exitCode == m.actualExitCode, nil +} + +func (m *exitMatcher) FailureMessage(actual interface{}) (message string) { + if m.actualExitCode == -1 { + return "Expected process to exit. It did not." + } + return format.Message(m.actualExitCode, "to match exit code:", m.exitCode) +} + +func (m *exitMatcher) NegatedFailureMessage(actual interface{}) (message string) { + if m.actualExitCode == -1 { + return "you really shouldn't be able to see this!" + } else { + if m.exitCode == -1 { + return "Expected process not to exit. It did." + } + return format.Message(m.actualExitCode, "not to match exit code:", m.exitCode) + } +} + +func (m *exitMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + session, ok := actual.(*Session) + if ok { + return session.ExitCode() == -1 + } + return true +} diff --git a/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go b/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go new file mode 100644 index 000000000..05e695abc --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go @@ -0,0 +1,53 @@ +package gexec + +import ( + "io" + "sync" +) + +/* +PrefixedWriter wraps an io.Writer, emiting the passed in prefix at the beginning of each new line. +This can be useful when running multiple gexec.Sessions concurrently - you can prefix the log output of each +session by passing in a PrefixedWriter: + +gexec.Start(cmd, NewPrefixedWriter("[my-cmd] ", GinkgoWriter), NewPrefixedWriter("[my-cmd] ", GinkgoWriter)) +*/ +type PrefixedWriter struct { + prefix []byte + writer io.Writer + lock *sync.Mutex + atStartOfLine bool +} + +func NewPrefixedWriter(prefix string, writer io.Writer) *PrefixedWriter { + return &PrefixedWriter{ + prefix: []byte(prefix), + writer: writer, + lock: &sync.Mutex{}, + atStartOfLine: true, + } +} + +func (w *PrefixedWriter) Write(b []byte) (int, error) { + w.lock.Lock() + defer w.lock.Unlock() + + toWrite := []byte{} + + for _, c := range b { + if w.atStartOfLine { + toWrite = append(toWrite, w.prefix...) + } + + toWrite = append(toWrite, c) + + w.atStartOfLine = c == '\n' + } + + _, err := w.writer.Write(toWrite) + if err != nil { + return 0, err + } + + return len(b), nil +} diff --git a/vendor/github.com/onsi/gomega/gexec/session.go b/vendor/github.com/onsi/gomega/gexec/session.go new file mode 100644 index 000000000..5cb00ca65 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/session.go @@ -0,0 +1,303 @@ +/* +Package gexec provides support for testing external processes. +*/ +package gexec + +import ( + "io" + "os" + "os/exec" + "sync" + "syscall" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gbytes" +) + +const INVALID_EXIT_CODE = 254 + +type Session struct { + //The wrapped command + Command *exec.Cmd + + //A *gbytes.Buffer connected to the command's stdout + Out *gbytes.Buffer + + //A *gbytes.Buffer connected to the command's stderr + Err *gbytes.Buffer + + //A channel that will close when the command exits + Exited <-chan struct{} + + lock *sync.Mutex + exitCode int +} + +/* +Start starts the passed-in *exec.Cmd command. It wraps the command in a *gexec.Session. + +The session pipes the command's stdout and stderr to two *gbytes.Buffers available as properties on the session: session.Out and session.Err. +These buffers can be used with the gbytes.Say matcher to match against unread output: + + Expect(session.Out).Should(gbytes.Say("foo-out")) + Expect(session.Err).Should(gbytes.Say("foo-err")) + +In addition, Session satisfies the gbytes.BufferProvider interface and provides the stdout *gbytes.Buffer. This allows you to replace the first line, above, with: + + Expect(session).Should(gbytes.Say("foo-out")) + +When outWriter and/or errWriter are non-nil, the session will pipe stdout and/or stderr output both into the session *gybtes.Buffers and to the passed-in outWriter/errWriter. +This is useful for capturing the process's output or logging it to screen. In particular, when using Ginkgo it can be convenient to direct output to the GinkgoWriter: + + session, err := Start(command, GinkgoWriter, GinkgoWriter) + +This will log output when running tests in verbose mode, but - otherwise - will only log output when a test fails. + +The session wrapper is responsible for waiting on the *exec.Cmd command. You *should not* call command.Wait() yourself. +Instead, to assert that the command has exited you can use the gexec.Exit matcher: + + Expect(session).Should(gexec.Exit()) + +When the session exits it closes the stdout and stderr gbytes buffers. This will short circuit any +Eventuallys waiting for the buffers to Say something. +*/ +func Start(command *exec.Cmd, outWriter io.Writer, errWriter io.Writer) (*Session, error) { + exited := make(chan struct{}) + + session := &Session{ + Command: command, + Out: gbytes.NewBuffer(), + Err: gbytes.NewBuffer(), + Exited: exited, + lock: &sync.Mutex{}, + exitCode: -1, + } + + var commandOut, commandErr io.Writer + + commandOut, commandErr = session.Out, session.Err + + if outWriter != nil { + commandOut = io.MultiWriter(commandOut, outWriter) + } + + if errWriter != nil { + commandErr = io.MultiWriter(commandErr, errWriter) + } + + command.Stdout = commandOut + command.Stderr = commandErr + + err := command.Start() + if err == nil { + go session.monitorForExit(exited) + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + trackedSessions = append(trackedSessions, session) + } + + return session, err +} + +/* +Buffer implements the gbytes.BufferProvider interface and returns s.Out +This allows you to make gbytes.Say matcher assertions against stdout without having to reference .Out: + + Eventually(session).Should(gbytes.Say("foo")) +*/ +func (s *Session) Buffer() *gbytes.Buffer { + return s.Out +} + +/* +ExitCode returns the wrapped command's exit code. If the command hasn't exited yet, ExitCode returns -1. + +To assert that the command has exited it is more convenient to use the Exit matcher: + + Eventually(s).Should(gexec.Exit()) + +When the process exits because it has received a particular signal, the exit code will be 128+signal-value +(See http://www.tldp.org/LDP/abs/html/exitcodes.html and http://man7.org/linux/man-pages/man7/signal.7.html) + +*/ +func (s *Session) ExitCode() int { + s.lock.Lock() + defer s.lock.Unlock() + return s.exitCode +} + +/* +Wait waits until the wrapped command exits. It can be passed an optional timeout. +If the command does not exit within the timeout, Wait will trigger a test failure. + +Wait returns the session, making it possible to chain: + + session.Wait().Out.Contents() + +will wait for the command to exit then return the entirety of Out's contents. + +Wait uses eventually under the hood and accepts the same timeout/polling intervals that eventually does. +*/ +func (s *Session) Wait(timeout ...interface{}) *Session { + EventuallyWithOffset(1, s, timeout...).Should(Exit()) + return s +} + +/* +Kill sends the running command a SIGKILL signal. It does not wait for the process to exit. + +If the command has already exited, Kill returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Kill() *Session { + return s.Signal(syscall.SIGKILL) +} + +/* +Interrupt sends the running command a SIGINT signal. It does not wait for the process to exit. + +If the command has already exited, Interrupt returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Interrupt() *Session { + return s.Signal(syscall.SIGINT) +} + +/* +Terminate sends the running command a SIGTERM signal. It does not wait for the process to exit. + +If the command has already exited, Terminate returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Terminate() *Session { + return s.Signal(syscall.SIGTERM) +} + +/* +Signal sends the running command the passed in signal. It does not wait for the process to exit. + +If the command has already exited, Signal returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Signal(signal os.Signal) *Session { + if s.processIsAlive() { + s.Command.Process.Signal(signal) + } + return s +} + +func (s *Session) monitorForExit(exited chan<- struct{}) { + err := s.Command.Wait() + s.lock.Lock() + s.Out.Close() + s.Err.Close() + status := s.Command.ProcessState.Sys().(syscall.WaitStatus) + if status.Signaled() { + s.exitCode = 128 + int(status.Signal()) + } else { + exitStatus := status.ExitStatus() + if exitStatus == -1 && err != nil { + s.exitCode = INVALID_EXIT_CODE + } + s.exitCode = exitStatus + } + s.lock.Unlock() + + close(exited) +} + +func (s *Session) processIsAlive() bool { + return s.ExitCode() == -1 && s.Command.Process != nil +} + +var trackedSessions = []*Session{} +var trackedSessionsMutex = &sync.Mutex{} + +/* +Kill sends a SIGKILL signal to all the processes started by Run, and waits for them to exit. +The timeout specified is applied to each process killed. + +If any of the processes already exited, KillAndWait returns silently. +*/ +func KillAndWait(timeout ...interface{}) { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Kill().Wait(timeout...) + } + trackedSessions = []*Session{} +} + +/* +Kill sends a SIGTERM signal to all the processes started by Run, and waits for them to exit. +The timeout specified is applied to each process killed. + +If any of the processes already exited, TerminateAndWait returns silently. +*/ +func TerminateAndWait(timeout ...interface{}) { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Terminate().Wait(timeout...) + } +} + +/* +Kill sends a SIGKILL signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Kill returns silently. +*/ +func Kill() { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Kill() + } +} + +/* +Terminate sends a SIGTERM signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Terminate returns silently. +*/ +func Terminate() { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Terminate() + } +} + +/* +Signal sends the passed in signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Signal returns silently. +*/ +func Signal(signal os.Signal) { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Signal(signal) + } +} + +/* +Interrupt sends the SIGINT signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Interrupt returns silently. +*/ +func Interrupt() { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Interrupt() + } +} diff --git a/vendor/github.com/onsi/gomega/ghttp/handlers.go b/vendor/github.com/onsi/gomega/ghttp/handlers.go new file mode 100644 index 000000000..894eae6d4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/handlers.go @@ -0,0 +1,322 @@ +package ghttp + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +//CombineHandler takes variadic list of handlers and produces one handler +//that calls each handler in order. +func CombineHandlers(handlers ...http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + for _, handler := range handlers { + handler(w, req) + } + } +} + +//VerifyRequest returns a handler that verifies that a request uses the specified method to connect to the specified path +//You may also pass in an optional rawQuery string which is tested against the request's `req.URL.RawQuery` +// +//For path, you may pass in a string, in which case strict equality will be applied +//Alternatively you can pass in a matcher (ContainSubstring("/foo") and MatchRegexp("/foo/[a-f0-9]+") for example) +func VerifyRequest(method string, path interface{}, rawQuery ...string) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + Expect(req.Method).Should(Equal(method), "Method mismatch") + switch p := path.(type) { + case types.GomegaMatcher: + Expect(req.URL.Path).Should(p, "Path mismatch") + default: + Expect(req.URL.Path).Should(Equal(path), "Path mismatch") + } + if len(rawQuery) > 0 { + values, err := url.ParseQuery(rawQuery[0]) + Expect(err).ShouldNot(HaveOccurred(), "Expected RawQuery is malformed") + + Expect(req.URL.Query()).Should(Equal(values), "RawQuery mismatch") + } + } +} + +//VerifyContentType returns a handler that verifies that a request has a Content-Type header set to the +//specified value +func VerifyContentType(contentType string) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + Expect(req.Header.Get("Content-Type")).Should(Equal(contentType)) + } +} + +//VerifyMimeType returns a handler that verifies that a request has a specified mime type set +//in Content-Type header +func VerifyMimeType(mimeType string) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + Expect(strings.Split(req.Header.Get("Content-Type"), ";")[0]).Should(Equal(mimeType)) + } +} + +//VerifyBasicAuth returns a handler that verifies the request contains a BasicAuth Authorization header +//matching the passed in username and password +func VerifyBasicAuth(username string, password string) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + auth := req.Header.Get("Authorization") + Expect(auth).ShouldNot(Equal(""), "Authorization header must be specified") + + decoded, err := base64.StdEncoding.DecodeString(auth[6:]) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(string(decoded)).Should(Equal(fmt.Sprintf("%s:%s", username, password)), "Authorization mismatch") + } +} + +//VerifyHeader returns a handler that verifies the request contains the passed in headers. +//The passed in header keys are first canonicalized via http.CanonicalHeaderKey. +// +//The request must contain *all* the passed in headers, but it is allowed to have additional headers +//beyond the passed in set. +func VerifyHeader(header http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + for key, values := range header { + key = http.CanonicalHeaderKey(key) + Expect(req.Header[key]).Should(Equal(values), "Header mismatch for key: %s", key) + } + } +} + +//VerifyHeaderKV returns a handler that verifies the request contains a header matching the passed in key and values +//(recall that a `http.Header` is a mapping from string (key) to []string (values)) +//It is a convenience wrapper around `VerifyHeader` that allows you to avoid having to create an `http.Header` object. +func VerifyHeaderKV(key string, values ...string) http.HandlerFunc { + return VerifyHeader(http.Header{key: values}) +} + +//VerifyBody returns a handler that verifies that the body of the request matches the passed in byte array. +//It does this using Equal(). +func VerifyBody(expectedBody []byte) http.HandlerFunc { + return CombineHandlers( + func(w http.ResponseWriter, req *http.Request) { + body, err := ioutil.ReadAll(req.Body) + req.Body.Close() + Expect(err).ShouldNot(HaveOccurred()) + Expect(body).Should(Equal(expectedBody), "Body Mismatch") + }, + ) +} + +//VerifyJSON returns a handler that verifies that the body of the request is a valid JSON representation +//matching the passed in JSON string. It does this using Gomega's MatchJSON method +// +//VerifyJSON also verifies that the request's content type is application/json +func VerifyJSON(expectedJSON string) http.HandlerFunc { + return CombineHandlers( + VerifyMimeType("application/json"), + func(w http.ResponseWriter, req *http.Request) { + body, err := ioutil.ReadAll(req.Body) + req.Body.Close() + Expect(err).ShouldNot(HaveOccurred()) + Expect(body).Should(MatchJSON(expectedJSON), "JSON Mismatch") + }, + ) +} + +//VerifyJSONRepresenting is similar to VerifyJSON. Instead of taking a JSON string, however, it +//takes an arbitrary JSON-encodable object and verifies that the requests's body is a JSON representation +//that matches the object +func VerifyJSONRepresenting(object interface{}) http.HandlerFunc { + data, err := json.Marshal(object) + Expect(err).ShouldNot(HaveOccurred()) + return CombineHandlers( + VerifyContentType("application/json"), + VerifyJSON(string(data)), + ) +} + +//VerifyForm returns a handler that verifies a request contains the specified form values. +// +//The request must contain *all* of the specified values, but it is allowed to have additional +//form values beyond the passed in set. +func VerifyForm(values url.Values) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + Expect(err).ShouldNot(HaveOccurred()) + for key, vals := range values { + Expect(r.Form[key]).Should(Equal(vals), "Form mismatch for key: %s", key) + } + } +} + +//VerifyFormKV returns a handler that verifies a request contains a form key with the specified values. +// +//It is a convenience wrapper around `VerifyForm` that lets you avoid having to create a `url.Values` object. +func VerifyFormKV(key string, values ...string) http.HandlerFunc { + return VerifyForm(url.Values{key: values}) +} + +//VerifyProtoRepresenting returns a handler that verifies that the body of the request is a valid protobuf +//representation of the passed message. +// +//VerifyProtoRepresenting also verifies that the request's content type is application/x-protobuf +func VerifyProtoRepresenting(expected proto.Message) http.HandlerFunc { + return CombineHandlers( + VerifyContentType("application/x-protobuf"), + func(w http.ResponseWriter, req *http.Request) { + body, err := ioutil.ReadAll(req.Body) + Expect(err).ShouldNot(HaveOccurred()) + req.Body.Close() + + expectedType := reflect.TypeOf(expected) + actualValuePtr := reflect.New(expectedType.Elem()) + + actual, ok := actualValuePtr.Interface().(proto.Message) + Expect(ok).Should(BeTrue(), "Message value is not a proto.Message") + + err = proto.Unmarshal(body, actual) + Expect(err).ShouldNot(HaveOccurred(), "Failed to unmarshal protobuf") + + Expect(actual).Should(Equal(expected), "ProtoBuf Mismatch") + }, + ) +} + +func copyHeader(src http.Header, dst http.Header) { + for key, value := range src { + dst[key] = value + } +} + +/* +RespondWith returns a handler that responds to a request with the specified status code and body + +Body may be a string or []byte + +Also, RespondWith can be given an optional http.Header. The headers defined therein will be added to the response headers. +*/ +func RespondWith(statusCode int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + if len(optionalHeader) == 1 { + copyHeader(optionalHeader[0], w.Header()) + } + w.WriteHeader(statusCode) + switch x := body.(type) { + case string: + w.Write([]byte(x)) + case []byte: + w.Write(x) + default: + Expect(body).Should(BeNil(), "Invalid type for body. Should be string or []byte.") + } + } +} + +/* +RespondWithPtr returns a handler that responds to a request with the specified status code and body + +Unlike RespondWith, you pass RepondWithPtr a pointer to the status code and body allowing different tests +to share the same setup but specify different status codes and bodies. + +Also, RespondWithPtr can be given an optional http.Header. The headers defined therein will be added to the response headers. +Since the http.Header can be mutated after the fact you don't need to pass in a pointer. +*/ +func RespondWithPtr(statusCode *int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + if len(optionalHeader) == 1 { + copyHeader(optionalHeader[0], w.Header()) + } + w.WriteHeader(*statusCode) + if body != nil { + switch x := (body).(type) { + case *string: + w.Write([]byte(*x)) + case *[]byte: + w.Write(*x) + default: + Expect(body).Should(BeNil(), "Invalid type for body. Should be string or []byte.") + } + } + } +} + +/* +RespondWithJSONEncoded returns a handler that responds to a request with the specified status code and a body +containing the JSON-encoding of the passed in object + +Also, RespondWithJSONEncoded can be given an optional http.Header. The headers defined therein will be added to the response headers. +*/ +func RespondWithJSONEncoded(statusCode int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc { + data, err := json.Marshal(object) + Expect(err).ShouldNot(HaveOccurred()) + + var headers http.Header + if len(optionalHeader) == 1 { + headers = optionalHeader[0] + } else { + headers = make(http.Header) + } + if _, found := headers["Content-Type"]; !found { + headers["Content-Type"] = []string{"application/json"} + } + return RespondWith(statusCode, string(data), headers) +} + +/* +RespondWithJSONEncodedPtr behaves like RespondWithJSONEncoded but takes a pointer +to a status code and object. + +This allows different tests to share the same setup but specify different status codes and JSON-encoded +objects. + +Also, RespondWithJSONEncodedPtr can be given an optional http.Header. The headers defined therein will be added to the response headers. +Since the http.Header can be mutated after the fact you don't need to pass in a pointer. +*/ +func RespondWithJSONEncodedPtr(statusCode *int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + data, err := json.Marshal(object) + Expect(err).ShouldNot(HaveOccurred()) + var headers http.Header + if len(optionalHeader) == 1 { + headers = optionalHeader[0] + } else { + headers = make(http.Header) + } + if _, found := headers["Content-Type"]; !found { + headers["Content-Type"] = []string{"application/json"} + } + copyHeader(headers, w.Header()) + w.WriteHeader(*statusCode) + w.Write(data) + } +} + +//RespondWithProto returns a handler that responds to a request with the specified status code and a body +//containing the protobuf serialization of the provided message. +// +//Also, RespondWithProto can be given an optional http.Header. The headers defined therein will be added to the response headers. +func RespondWithProto(statusCode int, message proto.Message, optionalHeader ...http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + data, err := proto.Marshal(message) + Expect(err).ShouldNot(HaveOccurred()) + + var headers http.Header + if len(optionalHeader) == 1 { + headers = optionalHeader[0] + } else { + headers = make(http.Header) + } + if _, found := headers["Content-Type"]; !found { + headers["Content-Type"] = []string{"application/x-protobuf"} + } + copyHeader(headers, w.Header()) + + w.WriteHeader(statusCode) + w.Write(data) + } +} diff --git a/vendor/github.com/onsi/gomega/ghttp/test_server.go b/vendor/github.com/onsi/gomega/ghttp/test_server.go new file mode 100644 index 000000000..77535f309 --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/test_server.go @@ -0,0 +1,422 @@ +/* +Package ghttp supports testing HTTP clients by providing a test server (simply a thin wrapper around httptest's server) that supports +registering multiple handlers. Incoming requests are not routed between the different handlers +- rather it is merely the order of the handlers that matters. The first request is handled by the first +registered handler, the second request by the second handler, etc. + +The intent here is to have each handler *verify* that the incoming request is valid. To accomplish, ghttp +also provides a collection of bite-size handlers that each perform one aspect of request verification. These can +be composed together and registered with a ghttp server. The result is an expressive language for describing +the requests generated by the client under test. + +Here's a simple example, note that the server handler is only defined in one BeforeEach and then modified, as required, by the nested BeforeEaches. +A more comprehensive example is available at https://onsi.github.io/gomega/#_testing_http_clients + + var _ = Describe("A Sprockets Client", func() { + var server *ghttp.Server + var client *SprocketClient + BeforeEach(func() { + server = ghttp.NewServer() + client = NewSprocketClient(server.URL(), "skywalker", "tk427") + }) + + AfterEach(func() { + server.Close() + }) + + Describe("fetching sprockets", func() { + var statusCode int + var sprockets []Sprocket + BeforeEach(func() { + statusCode = http.StatusOK + sprockets = []Sprocket{} + server.AppendHandlers(ghttp.CombineHandlers( + ghttp.VerifyRequest("GET", "/sprockets"), + ghttp.VerifyBasicAuth("skywalker", "tk427"), + ghttp.RespondWithJSONEncodedPtr(&statusCode, &sprockets), + )) + }) + + Context("when requesting all sprockets", func() { + Context("when the response is succesful", func() { + BeforeEach(func() { + sprockets = []Sprocket{ + NewSprocket("Alfalfa"), + NewSprocket("Banana"), + } + }) + + It("should return the returned sprockets", func() { + Expect(client.Sprockets()).Should(Equal(sprockets)) + }) + }) + + Context("when the response is missing", func() { + BeforeEach(func() { + statusCode = http.StatusNotFound + }) + + It("should return an empty list of sprockets", func() { + Expect(client.Sprockets()).Should(BeEmpty()) + }) + }) + + Context("when the response fails to authenticate", func() { + BeforeEach(func() { + statusCode = http.StatusUnauthorized + }) + + It("should return an AuthenticationError error", func() { + sprockets, err := client.Sprockets() + Expect(sprockets).Should(BeEmpty()) + Expect(err).Should(MatchError(AuthenticationError)) + }) + }) + + Context("when the response is a server failure", func() { + BeforeEach(func() { + statusCode = http.StatusInternalServerError + }) + + It("should return an InternalError error", func() { + sprockets, err := client.Sprockets() + Expect(sprockets).Should(BeEmpty()) + Expect(err).Should(MatchError(InternalError)) + }) + }) + }) + + Context("when requesting some sprockets", func() { + BeforeEach(func() { + sprockets = []Sprocket{ + NewSprocket("Alfalfa"), + NewSprocket("Banana"), + } + + server.WrapHandler(0, ghttp.VerifyRequest("GET", "/sprockets", "filter=FOOD")) + }) + + It("should make the request with a filter", func() { + Expect(client.Sprockets("food")).Should(Equal(sprockets)) + }) + }) + }) + }) +*/ +package ghttp + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/http/httputil" + "reflect" + "regexp" + "strings" + "sync" + + . "github.com/onsi/gomega" +) + +func new() *Server { + return &Server{ + AllowUnhandledRequests: false, + UnhandledRequestStatusCode: http.StatusInternalServerError, + rwMutex: &sync.RWMutex{}, + } +} + +type routedHandler struct { + method string + pathRegexp *regexp.Regexp + path string + handler http.HandlerFunc +} + +// NewServer returns a new `*ghttp.Server` that wraps an `httptest` server. The server is started automatically. +func NewServer() *Server { + s := new() + s.HTTPTestServer = httptest.NewServer(s) + return s +} + +// NewUnstartedServer return a new, unstarted, `*ghttp.Server`. Useful for specifying a custom listener on `server.HTTPTestServer`. +func NewUnstartedServer() *Server { + s := new() + s.HTTPTestServer = httptest.NewUnstartedServer(s) + return s +} + +// NewTLSServer returns a new `*ghttp.Server` that wraps an `httptest` TLS server. The server is started automatically. +func NewTLSServer() *Server { + s := new() + s.HTTPTestServer = httptest.NewTLSServer(s) + return s +} + +type Server struct { + //The underlying httptest server + HTTPTestServer *httptest.Server + + //Defaults to false. If set to true, the Server will allow more requests than there are registered handlers. + //Direct use of this property is deprecated and is likely to be removed, use GetAllowUnhandledRequests and SetAllowUnhandledRequests instead. + AllowUnhandledRequests bool + + //The status code returned when receiving an unhandled request. + //Defaults to http.StatusInternalServerError. + //Only applies if AllowUnhandledRequests is true + //Direct use of this property is deprecated and is likely to be removed, use GetUnhandledRequestStatusCode and SetUnhandledRequestStatusCode instead. + UnhandledRequestStatusCode int + + //If provided, ghttp will log about each request received to the provided io.Writer + //Defaults to nil + //If you're using Ginkgo, set this to GinkgoWriter to get improved output during failures + Writer io.Writer + + receivedRequests []*http.Request + requestHandlers []http.HandlerFunc + routedHandlers []routedHandler + + rwMutex *sync.RWMutex + calls int +} + +//Start() starts an unstarted ghttp server. It is a catastrophic error to call Start more than once (thanks, httptest). +func (s *Server) Start() { + s.HTTPTestServer.Start() +} + +//URL() returns a url that will hit the server +func (s *Server) URL() string { + s.rwMutex.RLock() + defer s.rwMutex.RUnlock() + return s.HTTPTestServer.URL +} + +//Addr() returns the address on which the server is listening. +func (s *Server) Addr() string { + s.rwMutex.RLock() + defer s.rwMutex.RUnlock() + return s.HTTPTestServer.Listener.Addr().String() +} + +//Close() should be called at the end of each test. It spins down and cleans up the test server. +func (s *Server) Close() { + s.rwMutex.Lock() + server := s.HTTPTestServer + s.HTTPTestServer = nil + s.rwMutex.Unlock() + + if server != nil { + server.Close() + } +} + +//ServeHTTP() makes Server an http.Handler +//When the server receives a request it handles the request in the following order: +// +//1. If the request matches a handler registered with RouteToHandler, that handler is called. +//2. Otherwise, if there are handlers registered via AppendHandlers, those handlers are called in order. +//3. If all registered handlers have been called then: +// a) If AllowUnhandledRequests is set to true, the request will be handled with response code of UnhandledRequestStatusCode +// b) If AllowUnhandledRequests is false, the request will not be handled and the current test will be marked as failed. +func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.rwMutex.Lock() + defer func() { + e := recover() + if e != nil { + w.WriteHeader(http.StatusInternalServerError) + } + + //If the handler panics GHTTP will silently succeed. This is bad™. + //To catch this case we need to fail the test if the handler has panicked. + //However, if the handler is panicking because Ginkgo's causing it to panic (i.e. an assertion failed) + //then we shouldn't double-report the error as this will confuse people. + + //So: step 1, if this is a Ginkgo panic - do nothing, Ginkgo's aware of the failure + eAsString, ok := e.(string) + if ok && strings.Contains(eAsString, "defer GinkgoRecover()") { + return + } + + //If we're here, we have to do step 2: assert that the error is nil. This assertion will + //allow us to fail the test suite (note: we can't call Fail since Gomega is not allowed to import Ginkgo). + //Since a failed assertion throws a panic, and we are likely in a goroutine, we need to defer within our defer! + defer func() { + recover() + }() + Expect(e).Should(BeNil(), "Handler Panicked") + }() + + if s.Writer != nil { + s.Writer.Write([]byte(fmt.Sprintf("GHTTP Received Request: %s - %s\n", req.Method, req.URL))) + } + + s.receivedRequests = append(s.receivedRequests, req) + if routedHandler, ok := s.handlerForRoute(req.Method, req.URL.Path); ok { + s.rwMutex.Unlock() + routedHandler(w, req) + } else if s.calls < len(s.requestHandlers) { + h := s.requestHandlers[s.calls] + s.calls++ + s.rwMutex.Unlock() + h(w, req) + } else { + s.rwMutex.Unlock() + if s.GetAllowUnhandledRequests() { + ioutil.ReadAll(req.Body) + req.Body.Close() + w.WriteHeader(s.GetUnhandledRequestStatusCode()) + } else { + formatted, err := httputil.DumpRequest(req, true) + Expect(err).NotTo(HaveOccurred(), "Encountered error while dumping HTTP request") + Expect(string(formatted)).Should(BeNil(), "Received Unhandled Request") + } + } +} + +//ReceivedRequests is an array containing all requests received by the server (both handled and unhandled requests) +func (s *Server) ReceivedRequests() []*http.Request { + s.rwMutex.RLock() + defer s.rwMutex.RUnlock() + + return s.receivedRequests +} + +//RouteToHandler can be used to register handlers that will always handle requests that match +//the passed in method and path. +// +//The path may be either a string object or a *regexp.Regexp. +func (s *Server) RouteToHandler(method string, path interface{}, handler http.HandlerFunc) { + s.rwMutex.Lock() + defer s.rwMutex.Unlock() + + rh := routedHandler{ + method: method, + handler: handler, + } + + switch p := path.(type) { + case *regexp.Regexp: + rh.pathRegexp = p + case string: + rh.path = p + default: + panic("path must be a string or a regular expression") + } + + for i, existingRH := range s.routedHandlers { + if existingRH.method == method && + reflect.DeepEqual(existingRH.pathRegexp, rh.pathRegexp) && + existingRH.path == rh.path { + s.routedHandlers[i] = rh + return + } + } + s.routedHandlers = append(s.routedHandlers, rh) +} + +func (s *Server) handlerForRoute(method string, path string) (http.HandlerFunc, bool) { + for _, rh := range s.routedHandlers { + if rh.method == method { + if rh.pathRegexp != nil { + if rh.pathRegexp.Match([]byte(path)) { + return rh.handler, true + } + } else if rh.path == path { + return rh.handler, true + } + } + } + + return nil, false +} + +//AppendHandlers will appends http.HandlerFuncs to the server's list of registered handlers. The first incoming request is handled by the first handler, the second by the second, etc... +func (s *Server) AppendHandlers(handlers ...http.HandlerFunc) { + s.rwMutex.Lock() + defer s.rwMutex.Unlock() + + s.requestHandlers = append(s.requestHandlers, handlers...) +} + +//SetHandler overrides the registered handler at the passed in index with the passed in handler +//This is useful, for example, when a server has been set up in a shared context, but must be tweaked +//for a particular test. +func (s *Server) SetHandler(index int, handler http.HandlerFunc) { + s.rwMutex.Lock() + defer s.rwMutex.Unlock() + + s.requestHandlers[index] = handler +} + +//GetHandler returns the handler registered at the passed in index. +func (s *Server) GetHandler(index int) http.HandlerFunc { + s.rwMutex.RLock() + defer s.rwMutex.RUnlock() + + return s.requestHandlers[index] +} + +func (s *Server) Reset() { + s.rwMutex.Lock() + defer s.rwMutex.Unlock() + + s.HTTPTestServer.CloseClientConnections() + s.calls = 0 + s.receivedRequests = nil + s.requestHandlers = nil + s.routedHandlers = nil +} + +//WrapHandler combines the passed in handler with the handler registered at the passed in index. +//This is useful, for example, when a server has been set up in a shared context but must be tweaked +//for a particular test. +// +//If the currently registered handler is A, and the new passed in handler is B then +//WrapHandler will generate a new handler that first calls A, then calls B, and assign it to index +func (s *Server) WrapHandler(index int, handler http.HandlerFunc) { + existingHandler := s.GetHandler(index) + s.SetHandler(index, CombineHandlers(existingHandler, handler)) +} + +func (s *Server) CloseClientConnections() { + s.rwMutex.Lock() + defer s.rwMutex.Unlock() + + s.HTTPTestServer.CloseClientConnections() +} + +//SetAllowUnhandledRequests enables the server to accept unhandled requests. +func (s *Server) SetAllowUnhandledRequests(allowUnhandledRequests bool) { + s.rwMutex.Lock() + defer s.rwMutex.Unlock() + + s.AllowUnhandledRequests = allowUnhandledRequests +} + +//GetAllowUnhandledRequests returns true if the server accepts unhandled requests. +func (s *Server) GetAllowUnhandledRequests() bool { + s.rwMutex.RLock() + defer s.rwMutex.RUnlock() + + return s.AllowUnhandledRequests +} + +//SetUnhandledRequestStatusCode status code to be returned when the server receives unhandled requests +func (s *Server) SetUnhandledRequestStatusCode(statusCode int) { + s.rwMutex.Lock() + defer s.rwMutex.Unlock() + + s.UnhandledRequestStatusCode = statusCode +} + +//GetUnhandledRequestStatusCode returns the current status code being returned for unhandled requests +func (s *Server) GetUnhandledRequestStatusCode() int { + s.rwMutex.RLock() + defer s.rwMutex.RUnlock() + + return s.UnhandledRequestStatusCode +} diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE deleted file mode 100644 index 926d54987..000000000 --- a/vendor/github.com/satori/go.uuid/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013-2018 by Maxim Bublis - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/go.uuid/codec.go b/vendor/github.com/satori/go.uuid/codec.go deleted file mode 100644 index 656892c53..000000000 --- a/vendor/github.com/satori/go.uuid/codec.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "bytes" - "encoding/hex" - "fmt" -) - -// FromBytes returns UUID converted from raw byte slice input. -// It will return error if the slice isn't 16 bytes long. -func FromBytes(input []byte) (u UUID, err error) { - err = u.UnmarshalBinary(input) - return -} - -// FromBytesOrNil returns UUID converted from raw byte slice input. -// Same behavior as FromBytes, but returns a Nil UUID on error. -func FromBytesOrNil(input []byte) UUID { - uuid, err := FromBytes(input) - if err != nil { - return Nil - } - return uuid -} - -// FromString returns UUID parsed from string input. -// Input is expected in a form accepted by UnmarshalText. -func FromString(input string) (u UUID, err error) { - err = u.UnmarshalText([]byte(input)) - return -} - -// FromStringOrNil returns UUID parsed from string input. -// Same behavior as FromString, but returns a Nil UUID on error. -func FromStringOrNil(input string) UUID { - uuid, err := FromString(input) - if err != nil { - return Nil - } - return uuid -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The encoding is the same as returned by String. -func (u UUID) MarshalText() (text []byte, err error) { - text = []byte(u.String()) - return -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Following formats are supported: -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -// "6ba7b8109dad11d180b400c04fd430c8" -// ABNF for supported UUID text representation follows: -// uuid := canonical | hashlike | braced | urn -// plain := canonical | hashlike -// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct -// hashlike := 12hexoct -// braced := '{' plain '}' -// urn := URN ':' UUID-NID ':' plain -// URN := 'urn' -// UUID-NID := 'uuid' -// 12hexoct := 6hexoct 6hexoct -// 6hexoct := 4hexoct 2hexoct -// 4hexoct := 2hexoct 2hexoct -// 2hexoct := hexoct hexoct -// hexoct := hexdig hexdig -// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | -// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | -// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' -func (u *UUID) UnmarshalText(text []byte) (err error) { - switch len(text) { - case 32: - return u.decodeHashLike(text) - case 36: - return u.decodeCanonical(text) - case 38: - return u.decodeBraced(text) - case 41: - fallthrough - case 45: - return u.decodeURN(text) - default: - return fmt.Errorf("uuid: incorrect UUID length: %s", text) - } -} - -// decodeCanonical decodes UUID string in format -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". -func (u *UUID) decodeCanonical(t []byte) (err error) { - if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { - return fmt.Errorf("uuid: incorrect UUID format %s", t) - } - - src := t[:] - dst := u[:] - - for i, byteGroup := range byteGroups { - if i > 0 { - src = src[1:] // skip dash - } - _, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup]) - if err != nil { - return - } - src = src[byteGroup:] - dst = dst[byteGroup/2:] - } - - return -} - -// decodeHashLike decodes UUID string in format -// "6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodeHashLike(t []byte) (err error) { - src := t[:] - dst := u[:] - - if _, err = hex.Decode(dst, src); err != nil { - return err - } - return -} - -// decodeBraced decodes UUID string in format -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format -// "{6ba7b8109dad11d180b400c04fd430c8}". -func (u *UUID) decodeBraced(t []byte) (err error) { - l := len(t) - - if t[0] != '{' || t[l-1] != '}' { - return fmt.Errorf("uuid: incorrect UUID format %s", t) - } - - return u.decodePlain(t[1 : l-1]) -} - -// decodeURN decodes UUID string in format -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format -// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodeURN(t []byte) (err error) { - total := len(t) - - urn_uuid_prefix := t[:9] - - if !bytes.Equal(urn_uuid_prefix, urnPrefix) { - return fmt.Errorf("uuid: incorrect UUID format: %s", t) - } - - return u.decodePlain(t[9:total]) -} - -// decodePlain decodes UUID string in canonical format -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format -// "6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodePlain(t []byte) (err error) { - switch len(t) { - case 32: - return u.decodeHashLike(t) - case 36: - return u.decodeCanonical(t) - default: - return fmt.Errorf("uuid: incorrrect UUID length: %s", t) - } -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (u UUID) MarshalBinary() (data []byte, err error) { - data = u.Bytes() - return -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -// It will return error if the slice isn't 16 bytes long. -func (u *UUID) UnmarshalBinary(data []byte) (err error) { - if len(data) != Size { - err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) - return - } - copy(u[:], data) - - return -} diff --git a/vendor/github.com/satori/go.uuid/generator.go b/vendor/github.com/satori/go.uuid/generator.go deleted file mode 100644 index 3f2f1da2d..000000000 --- a/vendor/github.com/satori/go.uuid/generator.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "encoding/binary" - "hash" - "net" - "os" - "sync" - "time" -) - -// Difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). -const epochStart = 122192928000000000 - -var ( - global = newDefaultGenerator() - - epochFunc = unixTimeFunc - posixUID = uint32(os.Getuid()) - posixGID = uint32(os.Getgid()) -) - -// NewV1 returns UUID based on current timestamp and MAC address. -func NewV1() UUID { - return global.NewV1() -} - -// NewV2 returns DCE Security UUID based on POSIX UID/GID. -func NewV2(domain byte) UUID { - return global.NewV2(domain) -} - -// NewV3 returns UUID based on MD5 hash of namespace UUID and name. -func NewV3(ns UUID, name string) UUID { - return global.NewV3(ns, name) -} - -// NewV4 returns random generated UUID. -func NewV4() UUID { - return global.NewV4() -} - -// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. -func NewV5(ns UUID, name string) UUID { - return global.NewV5(ns, name) -} - -// Generator provides interface for generating UUIDs. -type Generator interface { - NewV1() UUID - NewV2(domain byte) UUID - NewV3(ns UUID, name string) UUID - NewV4() UUID - NewV5(ns UUID, name string) UUID -} - -// Default generator implementation. -type generator struct { - storageOnce sync.Once - storageMutex sync.Mutex - - lastTime uint64 - clockSequence uint16 - hardwareAddr [6]byte -} - -func newDefaultGenerator() Generator { - return &generator{} -} - -// NewV1 returns UUID based on current timestamp and MAC address. -func (g *generator) NewV1() UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := g.getStorage() - - binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - - copy(u[10:], hardwareAddr) - - u.SetVersion(V1) - u.SetVariant(VariantRFC4122) - - return u -} - -// NewV2 returns DCE Security UUID based on POSIX UID/GID. -func (g *generator) NewV2(domain byte) UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := g.getStorage() - - switch domain { - case DomainPerson: - binary.BigEndian.PutUint32(u[0:], posixUID) - case DomainGroup: - binary.BigEndian.PutUint32(u[0:], posixGID) - } - - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - u[9] = domain - - copy(u[10:], hardwareAddr) - - u.SetVersion(V2) - u.SetVariant(VariantRFC4122) - - return u -} - -// NewV3 returns UUID based on MD5 hash of namespace UUID and name. -func (g *generator) NewV3(ns UUID, name string) UUID { - u := newFromHash(md5.New(), ns, name) - u.SetVersion(V3) - u.SetVariant(VariantRFC4122) - - return u -} - -// NewV4 returns random generated UUID. -func (g *generator) NewV4() UUID { - u := UUID{} - g.safeRandom(u[:]) - u.SetVersion(V4) - u.SetVariant(VariantRFC4122) - - return u -} - -// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. -func (g *generator) NewV5(ns UUID, name string) UUID { - u := newFromHash(sha1.New(), ns, name) - u.SetVersion(V5) - u.SetVariant(VariantRFC4122) - - return u -} - -func (g *generator) initStorage() { - g.initClockSequence() - g.initHardwareAddr() -} - -func (g *generator) initClockSequence() { - buf := make([]byte, 2) - g.safeRandom(buf) - g.clockSequence = binary.BigEndian.Uint16(buf) -} - -func (g *generator) initHardwareAddr() { - interfaces, err := net.Interfaces() - if err == nil { - for _, iface := range interfaces { - if len(iface.HardwareAddr) >= 6 { - copy(g.hardwareAddr[:], iface.HardwareAddr) - return - } - } - } - - // Initialize hardwareAddr randomly in case - // of real network interfaces absence - g.safeRandom(g.hardwareAddr[:]) - - // Set multicast bit as recommended in RFC 4122 - g.hardwareAddr[0] |= 0x01 -} - -func (g *generator) safeRandom(dest []byte) { - if _, err := rand.Read(dest); err != nil { - panic(err) - } -} - -// Returns UUID v1/v2 storage state. -// Returns epoch timestamp, clock sequence, and hardware address. -func (g *generator) getStorage() (uint64, uint16, []byte) { - g.storageOnce.Do(g.initStorage) - - g.storageMutex.Lock() - defer g.storageMutex.Unlock() - - timeNow := epochFunc() - // Clock changed backwards since last UUID generation. - // Should increase clock sequence. - if timeNow <= g.lastTime { - g.clockSequence++ - } - g.lastTime = timeNow - - return timeNow, g.clockSequence, g.hardwareAddr[:] -} - -// Returns difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and current time. -// This is default epoch calculation function. -func unixTimeFunc() uint64 { - return epochStart + uint64(time.Now().UnixNano()/100) -} - -// Returns UUID based on hashing of namespace UUID and name. -func newFromHash(h hash.Hash, ns UUID, name string) UUID { - u := UUID{} - h.Write(ns[:]) - h.Write([]byte(name)) - copy(u[:], h.Sum(nil)) - - return u -} diff --git a/vendor/github.com/satori/go.uuid/sql.go b/vendor/github.com/satori/go.uuid/sql.go deleted file mode 100644 index 56759d390..000000000 --- a/vendor/github.com/satori/go.uuid/sql.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Value implements the driver.Valuer interface. -func (u UUID) Value() (driver.Value, error) { - return u.String(), nil -} - -// Scan implements the sql.Scanner interface. -// A 16-byte slice is handled by UnmarshalBinary, while -// a longer byte slice or a string is handled by UnmarshalText. -func (u *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - if len(src) == Size { - return u.UnmarshalBinary(src) - } - return u.UnmarshalText(src) - - case string: - return u.UnmarshalText([]byte(src)) - } - - return fmt.Errorf("uuid: cannot convert %T to UUID", src) -} - -// NullUUID can be used with the standard sql package to represent a -// UUID value that can be NULL in the database -type NullUUID struct { - UUID UUID - Valid bool -} - -// Value implements the driver.Valuer interface. -func (u NullUUID) Value() (driver.Value, error) { - if !u.Valid { - return nil, nil - } - // Delegate to UUID Value function - return u.UUID.Value() -} - -// Scan implements the sql.Scanner interface. -func (u *NullUUID) Scan(src interface{}) error { - if src == nil { - u.UUID, u.Valid = Nil, false - return nil - } - - // Delegate to UUID Scan function - u.Valid = true - return u.UUID.Scan(src) -} diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go deleted file mode 100644 index a2b8e2ca2..000000000 --- a/vendor/github.com/satori/go.uuid/uuid.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Package uuid provides implementation of Universally Unique Identifier (UUID). -// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and -// version 2 (as specified in DCE 1.1). -package uuid - -import ( - "bytes" - "encoding/hex" -) - -// Size of a UUID in bytes. -const Size = 16 - -// UUID representation compliant with specification -// described in RFC 4122. -type UUID [Size]byte - -// UUID versions -const ( - _ byte = iota - V1 - V2 - V3 - V4 - V5 -) - -// UUID layout variants. -const ( - VariantNCS byte = iota - VariantRFC4122 - VariantMicrosoft - VariantFuture -) - -// UUID DCE domains. -const ( - DomainPerson = iota - DomainGroup - DomainOrg -) - -// String parse helpers. -var ( - urnPrefix = []byte("urn:uuid:") - byteGroups = []int{8, 4, 4, 4, 12} -) - -// Nil is special form of UUID that is specified to have all -// 128 bits set to zero. -var Nil = UUID{} - -// Predefined namespace UUIDs. -var ( - NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) -) - -// Equal returns true if u1 and u2 equals, otherwise returns false. -func Equal(u1 UUID, u2 UUID) bool { - return bytes.Equal(u1[:], u2[:]) -} - -// Version returns algorithm version used to generate UUID. -func (u UUID) Version() byte { - return u[6] >> 4 -} - -// Variant returns UUID layout variant. -func (u UUID) Variant() byte { - switch { - case (u[8] >> 7) == 0x00: - return VariantNCS - case (u[8] >> 6) == 0x02: - return VariantRFC4122 - case (u[8] >> 5) == 0x06: - return VariantMicrosoft - case (u[8] >> 5) == 0x07: - fallthrough - default: - return VariantFuture - } -} - -// Bytes returns bytes slice representation of UUID. -func (u UUID) Bytes() []byte { - return u[:] -} - -// Returns canonical string representation of UUID: -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (u UUID) String() string { - buf := make([]byte, 36) - - hex.Encode(buf[0:8], u[0:4]) - buf[8] = '-' - hex.Encode(buf[9:13], u[4:6]) - buf[13] = '-' - hex.Encode(buf[14:18], u[6:8]) - buf[18] = '-' - hex.Encode(buf[19:23], u[8:10]) - buf[23] = '-' - hex.Encode(buf[24:], u[10:]) - - return string(buf) -} - -// SetVersion sets version bits. -func (u *UUID) SetVersion(v byte) { - u[6] = (u[6] & 0x0f) | (v << 4) -} - -// SetVariant sets variant bits. -func (u *UUID) SetVariant(v byte) { - switch v { - case VariantNCS: - u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) - case VariantRFC4122: - u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) - case VariantMicrosoft: - u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) - case VariantFuture: - fallthrough - default: - u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) - } -} - -// Must is a helper that wraps a call to a function returning (UUID, error) -// and panics if the error is non-nil. It is intended for use in variable -// initializations such as -// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")); -func Must(u UUID, err error) UUID { - if err != nil { - panic(err) - } - return u -} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 0f5b989d0..1f607943f 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -135,6 +135,7 @@ var brokenAuthHeaderProviders = []string{ "https://account.health.nokia.com", "https://accounts.zoho.com", "https://gitter.im/login/oauth/token", + "https://openid-connect.onelogin.com/oidc", } // brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints. diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go new file mode 100644 index 000000000..36c0b5ac4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksyscall.go @@ -0,0 +1,367 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +This program reads a file containing function prototypes +(like syscall_darwin.go) and generates system call bodies. +The prototypes are marked by lines beginning with "//sys" +and read like func declarations if //sys is replaced by func, but: + * The parameter lists must give a name for each argument. + This includes return parameters. + * The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + * If the return parameter is an error number, it must be named errno. + +A line beginning with //sysnb is like //sys, except that the +goroutine will not be suspended during the execution of the system +call. This must only be used for system calls which can never +block, as otherwise the system call could cause all goroutines to +hang. +*/ +package main + +import ( + "bufio" + "flag" + "fmt" + "os" + "regexp" + "strings" +) + +var ( + b32 = flag.Bool("b32", false, "32bit big-endian") + l32 = flag.Bool("l32", false, "32bit little-endian") + plan9 = flag.Bool("plan9", false, "plan9") + openbsd = flag.Bool("openbsd", false, "openbsd") + netbsd = flag.Bool("netbsd", false, "netbsd") + dragonfly = flag.Bool("dragonfly", false, "dragonfly") + arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair + tags = flag.String("tags", "", "build tags") + filename = flag.String("output", "", "output file name (standard output if omitted)") +) + +// cmdLine returns this programs's commandline arguments +func cmdLine() string { + return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags +func buildTags() string { + return *tags +} + +// Param is function parameter +type Param struct { + Name string + Type string +} + +// usage prints the program usage +func usage() { + fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") + os.Exit(1) +} + +// parseParamList parses parameter list and returns a slice of parameters +func parseParamList(list string) []string { + list = strings.TrimSpace(list) + if list == "" { + return []string{} + } + return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +} + +// parseParam splits a parameter into name and type +func parseParam(p string) Param { + ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) + if ps == nil { + fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) + os.Exit(1) + } + return Param{ps[1], ps[2]} +} + +func main() { + // Get the OS and architecture (using GOARCH_TARGET if it exists) + goos := os.Getenv("GOOS") + goarch := os.Getenv("GOARCH_TARGET") + if goarch == "" { + goarch = os.Getenv("GOARCH") + } + + // Check that we are using the new build system if we should + if goos == "linux" && goarch != "sparc64" { + if os.Getenv("GOLANG_SYS_BUILD") != "docker" { + fmt.Fprintf(os.Stderr, "In the new build system, mksyscall should not be called directly.\n") + fmt.Fprintf(os.Stderr, "See README.md\n") + os.Exit(1) + } + } + + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + endianness := "" + if *b32 { + endianness = "big-endian" + } else if *l32 { + endianness = "little-endian" + } + + text := "" + for _, path := range flag.Args() { + file, err := os.Open(path) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + s := bufio.NewScanner(file) + for s.Scan() { + t := s.Text() + t = strings.TrimSpace(t) + t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) + nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) + if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { + continue + } + + // Line must be of the form + // func Open(path string, mode int, perm int) (fd int, errno error) + // Split into name, in params, out params. + f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) + if f == nil { + fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) + os.Exit(1) + } + funct, inps, outps, sysname := f[2], f[3], f[4], f[5] + + // Split argument lists on comma. + in := parseParamList(inps) + out := parseParamList(outps) + + // Try in vain to keep people from editing this file. + // The theory is that they jump into the middle of the file + // without reading the header. + text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + + // Go function header. + outDecl := "" + if len(out) > 0 { + outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) + } + text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) + + // Check if err return available + errvar := "" + for _, param := range out { + p := parseParam(param) + if p.Type == "error" { + errvar = p.Name + break + } + } + + // Prepare arguments to Syscall. + var args []string + n := 0 + for _, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") + } else if p.Type == "string" && errvar != "" { + text += fmt.Sprintf("\tvar _p%d *byte\n", n) + text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) + text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + n++ + } else if p.Type == "string" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") + text += fmt.Sprintf("\tvar _p%d *byte\n", n) + text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + n++ + } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { + // Convert slice into pointer, length. + // Have to be careful not to take address of &a[0] if len == 0: + // pass dummy pointer in that case. + // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). + text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) + text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) + text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) + args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) + n++ + } else if p.Type == "int64" && (*openbsd || *netbsd) { + args = append(args, "0") + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else if endianness == "little-endian" { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } else if p.Type == "int64" && *dragonfly { + if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { + args = append(args, "0") + } + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else if endianness == "little-endian" { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } else if p.Type == "int64" && endianness != "" { + if len(args)%2 == 1 && *arm { + // arm abi specifies 64-bit argument uses + // (even, odd) pair + args = append(args, "0") + } + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } + + // Determine which form to use; pad args with zeros. + asm := "Syscall" + if nonblock != nil { + if errvar == "" && goos == "linux" { + asm = "RawSyscallNoError" + } else { + asm = "RawSyscall" + } + } else { + if errvar == "" && goos == "linux" { + asm = "SyscallNoError" + } + } + if len(args) <= 3 { + for len(args) < 3 { + args = append(args, "0") + } + } else if len(args) <= 6 { + asm += "6" + for len(args) < 6 { + args = append(args, "0") + } + } else if len(args) <= 9 { + asm += "9" + for len(args) < 9 { + args = append(args, "0") + } + } else { + fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) + } + + // System call number. + if sysname == "" { + sysname = "SYS_" + funct + sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) + sysname = strings.ToUpper(sysname) + } + + // Actual call. + arglist := strings.Join(args, ", ") + call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) + + // Assign return values. + body := "" + ret := []string{"_", "_", "_"} + doErrno := false + for i := 0; i < len(out); i++ { + p := parseParam(out[i]) + reg := "" + if p.Name == "err" && !*plan9 { + reg = "e1" + ret[2] = reg + doErrno = true + } else if p.Name == "err" && *plan9 { + ret[0] = "r0" + ret[2] = "e1" + break + } else { + reg = fmt.Sprintf("r%d", i) + ret[i] = reg + } + if p.Type == "bool" { + reg = fmt.Sprintf("%s != 0", reg) + } + if p.Type == "int64" && endianness != "" { + // 64-bit number in r1:r0 or r0:r1. + if i+2 > len(out) { + fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) + } + if endianness == "big-endian" { + reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) + } else { + reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) + } + ret[i] = fmt.Sprintf("r%d", i) + ret[i+1] = fmt.Sprintf("r%d", i+1) + } + if reg != "e1" || *plan9 { + body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) + } + } + if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { + text += fmt.Sprintf("\t%s\n", call) + } else { + if errvar == "" && goos == "linux" { + // raw syscall without error on Linux, see golang.org/issue/22924 + text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) + } else { + text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) + } + } + text += body + + if *plan9 && ret[2] == "e1" { + text += "\tif int32(r0) == -1 {\n" + text += "\t\terr = e1\n" + text += "\t}\n" + } else if doErrno { + text += "\tif e1 != 0 {\n" + text += "\t\terr = errnoErr(e1)\n" + text += "\t}\n" + } + text += "\treturn\n" + text += "}\n\n" + + } + if err := s.Err(); err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + file.Close() + } + fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) +} + +const srcTemplate = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +%s +` diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 9ce06df6e..8b7f27309 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -tags darwin,386 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go +// go run mksyscall.go -l32 -tags darwin,386 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,386 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index de9927049..285646462 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go +// go run mksyscall.go -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,amd64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 81c4f0935..37e3c6925 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -tags darwin,arm syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go +// go run mksyscall.go -l32 -tags darwin,arm syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,arm diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 338c32d40..67f3b4e5e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go +// go run mksyscall.go -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,arm64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 96a671344..da9986dd2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -dragonfly -tags dragonfly,amd64 syscall_bsd.go syscall_dragonfly.go syscall_dragonfly_amd64.go +// go run mksyscall.go -dragonfly -tags dragonfly,amd64 syscall_bsd.go syscall_dragonfly.go syscall_dragonfly_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build dragonfly,amd64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 9bbbf9662..80903e47b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -tags freebsd,386 syscall_bsd.go syscall_freebsd.go syscall_freebsd_386.go +// go run mksyscall.go -l32 -tags freebsd,386 syscall_bsd.go syscall_freebsd.go syscall_freebsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build freebsd,386 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index ee7090ff4..cd250ff0e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags freebsd,amd64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_amd64.go +// go run mksyscall.go -tags freebsd,amd64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build freebsd,amd64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 9aeff5131..290a9c2cb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -arm -tags freebsd,arm syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm.go +// go run mksyscall.go -l32 -arm -tags freebsd,arm syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build freebsd,arm diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index cd94680d1..5356a5175 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -tags linux,386 syscall_linux.go syscall_linux_386.go +// go run mksyscall.go -l32 -tags linux,386 syscall_linux.go syscall_linux_386.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,386 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 96e9df7da..5e3abfc10 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go +// go run mksyscall.go -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,amd64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index e8d82d14c..75db4c0c1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -arm -tags linux,arm syscall_linux.go syscall_linux_arm.go +// go run mksyscall.go -l32 -arm -tags linux,arm syscall_linux.go syscall_linux_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,arm diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 41f2d0cf0..b890cb03c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,arm64 syscall_linux.go syscall_linux_arm64.go +// go run mksyscall.go -tags linux,arm64 syscall_linux.go syscall_linux_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,arm64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 7e65fe0b7..cc17b43d3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -1,4 +1,4 @@ -// mksyscall.pl -b32 -arm -tags linux,mips syscall_linux.go syscall_linux_mipsx.go +// go run mksyscall.go -b32 -arm -tags linux,mips syscall_linux.go syscall_linux_mipsx.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,mips diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index fd06fb890..caf1408ec 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,mips64 syscall_linux.go syscall_linux_mips64x.go +// go run mksyscall.go -tags linux,mips64 syscall_linux.go syscall_linux_mips64x.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,mips64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 9e8ec28c9..266be8b4a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,mips64le syscall_linux.go syscall_linux_mips64x.go +// go run mksyscall.go -tags linux,mips64le syscall_linux.go syscall_linux_mips64x.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,mips64le diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index b4d2ccbb0..b16b3e102 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -arm -tags linux,mipsle syscall_linux.go syscall_linux_mipsx.go +// go run mksyscall.go -l32 -arm -tags linux,mipsle syscall_linux.go syscall_linux_mipsx.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,mipsle diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index bca3d2536..27b6a6bf0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,ppc64 syscall_linux.go syscall_linux_ppc64x.go +// go run mksyscall.go -tags linux,ppc64 syscall_linux.go syscall_linux_ppc64x.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,ppc64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index e34089c66..f7ecc9afd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,ppc64le syscall_linux.go syscall_linux_ppc64x.go +// go run mksyscall.go -tags linux,ppc64le syscall_linux.go syscall_linux_ppc64x.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,ppc64le diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 312b2afaf..e3cd4e53f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,riscv64 syscall_linux.go syscall_linux_riscv64.go +// go run mksyscall.go -tags linux,riscv64 syscall_linux.go syscall_linux_riscv64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,riscv64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 04ec8befe..3001d3798 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,s390x syscall_linux.go syscall_linux_s390x.go +// go run mksyscall.go -tags linux,s390x syscall_linux.go syscall_linux_s390x.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,s390x diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index b26aee957..aafe3660f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,sparc64 syscall_linux.go syscall_linux_sparc64.go +// go run mksyscall.go -tags linux,sparc64 syscall_linux.go syscall_linux_sparc64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,sparc64 @@ -417,6 +417,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { @@ -448,6 +458,21 @@ func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(oldfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) @@ -508,21 +533,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -589,6 +599,60 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -599,6 +663,42 @@ func Flock(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -737,6 +837,27 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) @@ -919,6 +1040,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1078,6 +1215,26 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { var _p0 *byte _p0, err = BytePtrFromString(keyType) @@ -1489,6 +1646,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 9ed7c71fb..642db7670 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -netbsd -tags netbsd,386 syscall_bsd.go syscall_netbsd.go syscall_netbsd_386.go +// go run mksyscall.go -l32 -netbsd -tags netbsd,386 syscall_bsd.go syscall_netbsd.go syscall_netbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build netbsd,386 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 613b7fd99..59585fee3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -netbsd -tags netbsd,amd64 syscall_bsd.go syscall_netbsd.go syscall_netbsd_amd64.go +// go run mksyscall.go -netbsd -tags netbsd,amd64 syscall_bsd.go syscall_netbsd.go syscall_netbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build netbsd,amd64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 084750878..6ec31434b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -netbsd -arm -tags netbsd,arm syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm.go +// go run mksyscall.go -l32 -netbsd -arm -tags netbsd,arm syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build netbsd,arm diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 414cd13c8..6a489fac0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -openbsd -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go +// go run mksyscall.go -l32 -openbsd -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build openbsd,386 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 846f5fa64..30cba4347 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -openbsd -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go +// go run mksyscall.go -openbsd -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build openbsd,amd64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 59911659d..fa1beda33 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go +// go run mksyscall.go -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build openbsd,arm diff --git a/vendor/gopkg.in/fsnotify/fsnotify.v1/AUTHORS b/vendor/gopkg.in/fsnotify/fsnotify.v1/AUTHORS new file mode 100644 index 000000000..5ab5d41c5 --- /dev/null +++ b/vendor/gopkg.in/fsnotify/fsnotify.v1/AUTHORS @@ -0,0 +1,52 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Evan Phoenix +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Nathan Youngman +Nickolai Zeldovich +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/vendor/gopkg.in/fsnotify/fsnotify.v1/LICENSE b/vendor/gopkg.in/fsnotify/fsnotify.v1/LICENSE new file mode 100644 index 000000000..f21e54080 --- /dev/null +++ b/vendor/gopkg.in/fsnotify/fsnotify.v1/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/fsnotify/fsnotify.v1/fen.go b/vendor/gopkg.in/fsnotify/fsnotify.v1/fen.go new file mode 100644 index 000000000..ced39cb88 --- /dev/null +++ b/vendor/gopkg.in/fsnotify/fsnotify.v1/fen.go @@ -0,0 +1,37 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/gopkg.in/fsnotify/fsnotify.v1/fsnotify.go b/vendor/gopkg.in/fsnotify/fsnotify.v1/fsnotify.go new file mode 100644 index 000000000..190bf0de5 --- /dev/null +++ b/vendor/gopkg.in/fsnotify/fsnotify.v1/fsnotify.go @@ -0,0 +1,66 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ErrEventOverflow = errors.New("fsnotify queue overflow") diff --git a/vendor/gopkg.in/fsnotify/fsnotify.v1/inotify.go b/vendor/gopkg.in/fsnotify/fsnotify.v1/inotify.go new file mode 100644 index 000000000..d9fd1b88a --- /dev/null +++ b/vendor/gopkg.in/fsnotify/fsnotify.v1/inotify.go @@ -0,0 +1,337 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/gopkg.in/fsnotify/fsnotify.v1/inotify_poller.go b/vendor/gopkg.in/fsnotify/fsnotify.v1/inotify_poller.go new file mode 100644 index 000000000..cc7db4b22 --- /dev/null +++ b/vendor/gopkg.in/fsnotify/fsnotify.v1/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(0) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/vendor/gopkg.in/fsnotify/fsnotify.v1/kqueue.go b/vendor/gopkg.in/fsnotify/fsnotify.v1/kqueue.go new file mode 100644 index 000000000..86e76a3d6 --- /dev/null +++ b/vendor/gopkg.in/fsnotify/fsnotify.v1/kqueue.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/vendor/gopkg.in/fsnotify/fsnotify.v1/open_mode_bsd.go b/vendor/gopkg.in/fsnotify/fsnotify.v1/open_mode_bsd.go new file mode 100644 index 000000000..7d8de1451 --- /dev/null +++ b/vendor/gopkg.in/fsnotify/fsnotify.v1/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY diff --git a/vendor/gopkg.in/fsnotify/fsnotify.v1/open_mode_darwin.go b/vendor/gopkg.in/fsnotify/fsnotify.v1/open_mode_darwin.go new file mode 100644 index 000000000..9139e1716 --- /dev/null +++ b/vendor/gopkg.in/fsnotify/fsnotify.v1/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY diff --git a/vendor/gopkg.in/fsnotify/fsnotify.v1/windows.go b/vendor/gopkg.in/fsnotify/fsnotify.v1/windows.go new file mode 100644 index 000000000..09436f31d --- /dev/null +++ b/vendor/gopkg.in/fsnotify/fsnotify.v1/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go index a14435e82..0ee738e11 100644 --- a/vendor/gopkg.in/yaml.v2/encode.go +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -13,6 +13,19 @@ import ( "unicode/utf8" ) +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + type encoder struct { emitter yaml_emitter_t event yaml_event_t @@ -89,6 +102,21 @@ func (e *encoder) marshal(tag string, in reflect.Value) { } iface := in.Interface() switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) case time.Time, *time.Time: // Although time.Time implements TextMarshaler, // we don't want to treat it as a string for YAML