From 1b29d24c7f95d890f15593274b91bbbcff517111 Mon Sep 17 00:00:00 2001 From: cossdk Date: Fri, 6 Sep 2019 19:48:10 +0000 Subject: [PATCH] Release 1.2.0 --- .yardopts | 7 + CHANGELOG.md | 7 + Gemfile | 3 + Makefile | 9 +- README.md | 11 +- aws/awserr/error.go | 43 +- aws/awserr/types.go | 31 +- aws/awsutil/path_value.go | 11 +- aws/client/default_retryer.go | 31 +- aws/client/default_retryer_test.go | 2 +- aws/client/logger.go | 12 +- aws/config.go | 4 +- aws/convert_types_test.go | 2 +- aws/corehandlers/handlers.go | 60 +- aws/corehandlers/handlers_1_10_test.go | 3 + aws/corehandlers/handlers_test.go | 6 +- aws/credentials/chain_provider_test.go | 3 + aws/credentials/credentials.go | 8 +- aws/credentials/endpointcreds/provider.go | 17 +- .../endpointcreds/provider_test.go | 4 +- aws/credentials/ibmiam/common.go | 3 +- aws/credentials/processcreds/provider_test.go | 99 +- aws/ec2metadata/api.go | 4 +- aws/ec2metadata/service.go | 4 +- aws/ec2metadata/service_test.go | 6 +- aws/endpoints/defaults.go | 652 +++- aws/endpoints/endpoints.go | 7 +- aws/endpoints/endpoints_test.go | 8 +- aws/endpoints/example_test.go | 10 - aws/endpoints/v3model.go | 5 +- aws/request/connection_reset_error.go | 17 +- aws/request/connection_reset_error_other.go | 11 - .../connection_reset_error_other_test.go | 12 - aws/request/connection_reset_error_test.go | 127 +- aws/request/handlers.go | 45 + aws/request/http_request_retry_test.go | 4 +- aws/request/offset_reader.go | 15 +- aws/request/offset_reader_test.go | 30 +- aws/request/request.go | 139 +- aws/request/request_1_8.go | 5 +- aws/request/request_pagination.go | 2 +- aws/request/request_retry_test.go | 115 +- aws/request/request_test.go | 226 +- aws/request/retryer.go | 145 +- aws/session/credentials.go | 124 + aws/session/custom_ca_bundle_test.go | 28 +- aws/session/doc.go | 210 +- aws/session/env_config.go | 33 +- aws/session/env_config_test.go | 67 +- aws/session/session.go | 160 +- aws/session/session_test.go | 496 +-- aws/session/shared_config.go | 200 +- aws/session/shared_config_test.go | 153 +- aws/session/shared_test.go | 15 + aws/session/testdata/credential_source_config | 25 +- .../credential_source_config_for_windows | 10 + aws/session/testdata/shared_config | 16 + aws/session/testdata/test_json.json | 5 + aws/signer/v4/v4.go | 58 +- aws/signer/v4/v4_test.go | 6 + aws/types.go | 20 +- aws/version.go | 2 +- awstesting/util.go | 9 +- .../loggingUploadObjectReadBehavior/README.md | 14 + .../loggingUploadObjectReadBehavior/main.go | 118 + internal/sdktesting/env.go | 53 + .../shared_config_other_test.go | 10 +- .../shared_config_windows_test.go | 10 +- models/apis/s3/2006-03-01/api-2.json | 591 +++- models/apis/s3/2006-03-01/docs-2.json | 161 +- models/endpoints/endpoints.json | 570 +++- private/model/api/api.go | 2 +- .../models/restjson/0000-00-00/api-2.json | 1 - .../models/restxml/0000-00-00/api-2.json | 1 - .../service/restjsonservice/service.go | 2 - .../service/restxmlservice/service.go | 2 - private/model/api/customization_passes.go | 82 +- private/model/api/docstring.go | 623 ++-- private/model/api/docstring_test.go | 159 +- private/model/api/example.go | 4 +- private/model/api/examples_builder.go | 2 +- private/model/api/operation.go | 19 +- private/model/api/passes_test.go | 14 +- private/model/api/s3manger_input.go | 4 +- private/model/api/service_name.go | 142 +- private/model/api/shape.go | 5 +- private/protocol/ec2query/build.go | 3 +- private/protocol/ec2query/unmarshal.go | 32 +- .../protocol/ec2query/unmarshal_error_test.go | 82 + private/protocol/json/jsonutil/unmarshal.go | 22 + private/protocol/jsonrpc/jsonrpc.go | 20 +- .../protocol/jsonrpc/unmarshal_err_test.go | 79 + private/protocol/query/build.go | 2 +- private/protocol/query/unmarshal.go | 2 +- private/protocol/query/unmarshal_error.go | 77 +- .../protocol/query/unmarshal_error_test.go | 94 + private/protocol/rest/build.go | 20 +- private/protocol/rest/unmarshal.go | 14 +- private/protocol/restjson/restjson.go | 17 +- .../protocol/restjson/unmarshal_error_test.go | 79 + private/protocol/restxml/restxml.go | 6 +- private/protocol/timestamp.go | 13 + private/protocol/unmarshal_test.go | 10 +- private/protocol/xml/xmlutil/unmarshal.go | 21 +- service/kms/api.go | 83 +- service/kms/errors.go | 10 +- service/s3/api.go | 2865 +++++++++++++++-- service/s3/bucket_location.go | 3 +- service/s3/bucket_location_test.go | 2 +- service/s3/customizations.go | 15 +- service/s3/doc_custom.go | 14 + service/s3/errors.go | 6 + service/s3/s3crypto/decryption_client.go | 2 +- service/s3/s3crypto/kms_key_handler.go | 2 +- service/s3/s3iface/interface.go | 40 + service/s3/s3manager/batch.go | 6 +- service/s3/s3manager/download.go | 8 +- .../s3/s3manager/s3manageriface/interface.go | 24 +- service/s3/s3manager/upload.go | 157 +- service/s3/s3manager/upload_input.go | 21 +- service/s3/s3manager/upload_test.go | 308 ++ service/s3/sse.go | 64 +- service/s3/sse_test.go | 33 +- service/s3/statusok_error.go | 4 +- service/s3/unmarshal_error.go | 34 +- 125 files changed, 7917 insertions(+), 2568 deletions(-) create mode 100644 .yardopts create mode 100644 Gemfile delete mode 100644 aws/request/connection_reset_error_other.go delete mode 100644 aws/request/connection_reset_error_other_test.go create mode 100644 aws/session/credentials.go create mode 100644 aws/session/shared_test.go create mode 100644 aws/session/testdata/credential_source_config_for_windows create mode 100644 aws/session/testdata/test_json.json create mode 100644 example/service/s3/loggingUploadObjectReadBehavior/README.md create mode 100644 example/service/s3/loggingUploadObjectReadBehavior/main.go create mode 100644 internal/sdktesting/env.go create mode 100644 private/protocol/ec2query/unmarshal_error_test.go create mode 100644 private/protocol/jsonrpc/unmarshal_err_test.go create mode 100644 private/protocol/query/unmarshal_error_test.go create mode 100644 private/protocol/restjson/unmarshal_error_test.go diff --git a/.yardopts b/.yardopts new file mode 100644 index 00000000..95832795 --- /dev/null +++ b/.yardopts @@ -0,0 +1,7 @@ +--plugin go +-e doc-src/plugin/plugin.rb +-m markdown +-o doc/api +--title "IBM COS SDK for Go" +aws/**/*.go +service/**/*.go diff --git a/CHANGELOG.md b/CHANGELOG.md index c8b199cf..65d49525 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # CHANGELOG +# 1.2.0 +## Content +* Immutable Object Storage +* Archive Tier Support +### Defect Fixes +* AWS Patches aligned to version 1.23.4 of the AWS SDK for Go + # 1.1.0 ## Content * Key Protect Support diff --git a/Gemfile b/Gemfile new file mode 100644 index 00000000..b9d98f98 --- /dev/null +++ b/Gemfile @@ -0,0 +1,3 @@ +source 'https://rubygems.org' +gem 'yard', git: 'git://github.com/lsegal/yard' +gem 'yard-go', git: 'git://github.com/lsegal/yard-go' diff --git a/Makefile b/Makefile index 4466e88a..b352d11a 100644 --- a/Makefile +++ b/Makefile @@ -51,6 +51,11 @@ cleanup-models: ################### # Unit/CI Testing # ################### + +unit-no-verify: + @echo "go test SDK and vendor packages with no linting" + go test -count=1 -tags ${UNIT_TEST_TAGS} ${SDK_ALL_PKGS} + unit: verify @echo "go test SDK and vendor packages" go test -count=1 -tags ${UNIT_TEST_TAGS} ${SDK_ALL_PKGS} @@ -67,8 +72,10 @@ ci-test: generate unit-with-race-cover ci-test-generate-validate ci-test-generate-validate: @echo "CI test validate no generated code changes" + git update-index --assume-unchanged go.mod go.sum git add . -A gitstatus=`git diff --cached --ignore-space-change`; \ + git update-index --no-assume-unchanged go.mod go.sum echo "$$gitstatus"; \ if [ "$$gitstatus" != "" ] && [ "$$gitstatus" != "skipping validation" ]; then echo "$$gitstatus"; exit 1; fi @@ -227,4 +234,4 @@ docs: $(AWS_DOC_GEN_TOOL) `pwd` api_info: - @go run private/model/cli/api-info/api-info.go + @go run private/model/cli/api-info/api-info.go \ No newline at end of file diff --git a/README.md b/README.md index 69eb23d9..11e71cea 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,15 @@ You'll need: These values can be found in the IBM Cloud Console by [generating a 'service credential'](https://cloud.ibm.com/docs/services/cloud-object-storage/iam?topic=cloud-object-storage-service-credentials#service-credentials). +## Archive Tier Support +You can automatically archive objects after a specified length of time or after a specified date. Once archived, a temporary copy of an object can be restored for access as needed. Restore time may take up to 15 hours. + +An archive policy is set at the bucket level by calling the ``PutBucketLifecycleConfiguration`` method on a client instance. A newly added or modified archive policy applies to new objects uploaded and does not affect existing objects. For more detail, see the [documentation](https://cloud.ibm.com/docs/services/cloud-object-storage?topic=cloud-object-storage-go). + +## Immutable Object Storage +Users can configure buckets with an Immutable Object Storage policy to prevent objects from being modified or deleted for a defined period of time. The retention period can be specified on a per-object basis, or objects can inherit a default retention period set on the bucket. It is also possible to set open-ended and permanent retention periods. Immutable Object Storage meets the rules set forth by the SEC governing record retention, and IBM Cloud administrators are unable to bypass these restrictions. For more detail, see the [IBM Cloud documentation](https://cloud.ibm.com/docs/services/cloud-object-storage?topic=cloud-object-storage-go). + +Note: Immutable Object Storage does not support Aspera transfers via the SDK to upload objects or directories at this stage. ## Getting the SDK @@ -59,7 +68,7 @@ import ( const ( apiKey = "" serviceInstanceID = "" - authEndpoint = "https://iam.bluemix.net/oidc/token" + authEndpoint = "https://iam.cloud.ibm.com/identity/token" serviceEndpoint = "https://s3-api.us-geo.objectstorage.softlayer.net" ) diff --git a/aws/awserr/error.go b/aws/awserr/error.go index c1c3ce3f..99849c0e 100644 --- a/aws/awserr/error.go +++ b/aws/awserr/error.go @@ -42,6 +42,26 @@ type Error interface { OrigErr() error } +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + // BatchedErrors is a batch of errors which also wraps lower level errors with // code, message, and original errors. Calling Error() will include all errors // that occurred in the batch. @@ -118,8 +138,27 @@ type RequestFailure interface { RequestID() string } -// NewRequestFailure returns a new request error wrapper for the given Error -// provided. +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { return newRequestError(err, statusCode, reqID) } + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/aws/awserr/types.go b/aws/awserr/types.go index 0202a008..9cf7eaf4 100644 --- a/aws/awserr/types.go +++ b/aws/awserr/types.go @@ -1,6 +1,9 @@ package awserr -import "fmt" +import ( + "encoding/hex" + "fmt" +) // SprintError returns a string of the formatted error code. // @@ -119,6 +122,7 @@ type requestError struct { awsError statusCode int requestID string + bytes []byte } // newRequestError returns a wrapped error with additional information for @@ -170,6 +174,29 @@ func (r requestError) OrigErrs() []error { return []error{r.OrigErr()} } +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + // An error list that satisfies the golang interface type errorList []error @@ -181,7 +208,7 @@ func (e errorList) Error() string { // How do we want to handle the array size being zero if size := len(e); size > 0 { for i := 0; i < size; i++ { - msg += fmt.Sprintf("%s", e[i].Error()) + msg += e[i].Error() // We check the next index to see if it is within the slice. // If it is, then we append a newline. We do this, because unit tests // could be broken with the additional '\n' diff --git a/aws/awsutil/path_value.go b/aws/awsutil/path_value.go index 11c52c38..285e54d6 100644 --- a/aws/awsutil/path_value.go +++ b/aws/awsutil/path_value.go @@ -185,13 +185,12 @@ func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { // SetValueAtPath sets a value at the case insensitive lexical path inside // of a structure. func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue } + setValue(rval, v) } } diff --git a/aws/client/default_retryer.go b/aws/client/default_retryer.go index 5a61bb38..a143a111 100644 --- a/aws/client/default_retryer.go +++ b/aws/client/default_retryer.go @@ -14,12 +14,12 @@ import ( // struct and override the specific methods. For example, to override only // the MaxRetries method: // -// type retryer struct { -// client.DefaultRetryer -// } +// type retryer struct { +// client.DefaultRetryer +// } // -// // This implementation always has 100 max retries -// func (d retryer) MaxRetries() int { return 100 } +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { NumMaxRetries int } @@ -34,8 +34,8 @@ func (d DefaultRetryer) MaxRetries() int { func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { // Set the upper limit of delay in retrying at ~five minutes minTime := 30 - throttle := d.shouldThrottle(r) - if throttle { + isThrottle := r.IsErrorThrottle() + if isThrottle { if delay, ok := getRetryDelay(r); ok { return delay } @@ -44,7 +44,7 @@ func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { } retryCount := r.RetryCount - if throttle && retryCount > 8 { + if isThrottle && retryCount > 8 { retryCount = 8 } else if retryCount > 13 { retryCount = 13 @@ -65,21 +65,8 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { return true } - return r.IsErrorRetryable() || d.shouldThrottle(r) -} - -// ShouldThrottle returns true if the request should be throttled. -func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 502: - case 503: - case 504: - default: - return r.IsErrorThrottle() - } - return true + return r.IsErrorRetryable() || r.IsErrorThrottle() } // This will look in the Retry-After header, RFC 7231, for how long diff --git a/aws/client/default_retryer_test.go b/aws/client/default_retryer_test.go index 02913bf7..0dbbd1b3 100644 --- a/aws/client/default_retryer_test.go +++ b/aws/client/default_retryer_test.go @@ -60,7 +60,7 @@ func TestRetryThrottleStatusCodes(t *testing.T) { d := DefaultRetryer{NumMaxRetries: 10} for i, c := range cases { - throttle := d.shouldThrottle(&c.r) + throttle := c.r.IsErrorThrottle() retry := d.ShouldRetry(&c.r) if e, a := c.expectThrottle, throttle; e != a { diff --git a/aws/client/logger.go b/aws/client/logger.go index b33b76c8..989cfc99 100644 --- a/aws/client/logger.go +++ b/aws/client/logger.go @@ -67,10 +67,14 @@ func logRequest(r *request.Request) { if !bodySeekable { r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) } - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.ResetBody() + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } } r.Config.Logger.Log(fmt.Sprintf(logReqMsg, diff --git a/aws/config.go b/aws/config.go index 23918380..4b665d46 100644 --- a/aws/config.go +++ b/aws/config.go @@ -20,7 +20,7 @@ type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, // all clients will use the defaults.DefaultConfig structure. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(&aws.Config{ // MaxRetries: aws.Int(3), @@ -251,7 +251,7 @@ type Config struct { // NewConfig returns a new Config pointer that can be chained with builder // methods to set multiple configuration values inline without using pointers. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(aws.NewConfig(). // WithMaxRetries(3), diff --git a/aws/convert_types_test.go b/aws/convert_types_test.go index 1a9461e1..9bc5f742 100644 --- a/aws/convert_types_test.go +++ b/aws/convert_types_test.go @@ -562,7 +562,7 @@ func TestTimeValueSlice(t *testing.T) { } for i := range out2 { if in[i] == nil { - if !(*(out2[i])).IsZero() { + if !(out2[i]).IsZero() { t.Errorf("Unexpected value at idx %d", idx) } } else { diff --git a/aws/corehandlers/handlers.go b/aws/corehandlers/handlers.go index d194eb3e..681ffd3e 100644 --- a/aws/corehandlers/handlers.go +++ b/aws/corehandlers/handlers.go @@ -159,9 +159,9 @@ func handleSendError(r *request.Request, err error) { Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } } - // Catch all other request errors. + // Catch all request errors, and let the default retrier determine + // if the error is retryable. r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable // Override the error with a context canceled error, if that was canceled. ctx := r.Context() @@ -184,37 +184,39 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH // AfterRetryHandler performs final checks to determine if the request should // be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } - r.RetryCount++ - r.Error = nil - } -}} + r.RetryCount++ + r.Error = nil + } + }} // ValidateEndpointHandler is a request handler to validate a request had the // appropriate Region and Endpoint set. Will set r.Error if the endpoint or diff --git a/aws/corehandlers/handlers_1_10_test.go b/aws/corehandlers/handlers_1_10_test.go index 70f9b16d..8d4ecb6f 100644 --- a/aws/corehandlers/handlers_1_10_test.go +++ b/aws/corehandlers/handlers_1_10_test.go @@ -43,6 +43,9 @@ func TestSendHandler_HEADNoBody(t *testing.T) { S3ForcePathStyle: aws.Bool(true), }, }) + if err != nil { + t.Fatalf("expect no error, got %v", err) + } svc := s3.New(sess) diff --git a/aws/corehandlers/handlers_test.go b/aws/corehandlers/handlers_test.go index c295620f..514b5bc4 100644 --- a/aws/corehandlers/handlers_test.go +++ b/aws/corehandlers/handlers_test.go @@ -119,7 +119,7 @@ func TestAfterRetryWithContextCanceled(t *testing.T) { req := c.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) - ctx := &awstesting.FakeContext{DoneCh: make(chan struct{}, 0)} + ctx := &awstesting.FakeContext{DoneCh: make(chan struct{})} req.SetContext(ctx) req.Error = fmt.Errorf("some error") @@ -149,7 +149,7 @@ func TestAfterRetryWithContext(t *testing.T) { req := c.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) - ctx := &awstesting.FakeContext{DoneCh: make(chan struct{}, 0)} + ctx := &awstesting.FakeContext{DoneCh: make(chan struct{})} req.SetContext(ctx) req.Error = fmt.Errorf("some error") @@ -177,7 +177,7 @@ func TestSendWithContextCanceled(t *testing.T) { req := c.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) - ctx := &awstesting.FakeContext{DoneCh: make(chan struct{}, 0)} + ctx := &awstesting.FakeContext{DoneCh: make(chan struct{})} req.SetContext(ctx) req.Error = fmt.Errorf("some error") diff --git a/aws/credentials/chain_provider_test.go b/aws/credentials/chain_provider_test.go index 5ed568a0..e26b9fd5 100644 --- a/aws/credentials/chain_provider_test.go +++ b/aws/credentials/chain_provider_test.go @@ -120,6 +120,9 @@ func TestChainProviderIsExpired(t *testing.T) { } _, err = p.Retrieve() + if err != nil { + t.Errorf("Expect no error, got %v", err) + } if p.IsExpired() { t.Errorf("Expect not expired after retrieve") } diff --git a/aws/credentials/credentials.go b/aws/credentials/credentials.go index 944866d5..b2dfd578 100644 --- a/aws/credentials/credentials.go +++ b/aws/credentials/credentials.go @@ -79,7 +79,7 @@ type Value struct { // AWS Secret Access Key SecretAccessKey string - // AWS Session token + // AWS Session Token SessionToken string // Provider used to get credentials @@ -95,6 +95,12 @@ type Value struct { ServiceInstanceID string } +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + // A Provider is the interface for any component which will provide credentials // Value. A provider is required to manage its own Expired state, and what to // be expired means. diff --git a/aws/credentials/endpointcreds/provider.go b/aws/credentials/endpointcreds/provider.go index 809125ed..02d20cb5 100644 --- a/aws/credentials/endpointcreds/provider.go +++ b/aws/credentials/endpointcreds/provider.go @@ -17,7 +17,7 @@ // { // "AccessKeyId" : "MUA...", // "SecretAccessKey" : "/7PC5om....", -// "token" : "AQoDY....=", +// "Token" : "AQoDY....=", // "Expiration" : "2016-02-25T06:03:31Z" // } // @@ -39,6 +39,7 @@ import ( "github.com/IBM/ibm-cos-sdk-go/aws/client/metadata" "github.com/IBM/ibm-cos-sdk-go/aws/credentials" "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil" ) // ProviderName is the name of the credentials provider. @@ -174,7 +175,7 @@ func unmarshalHandler(r *request.Request) { out := r.Data.(*getCredentialsOutput) if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode endpoint credentials", err, ) @@ -185,11 +186,15 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() var errOut errorOutput - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { - r.Error = awserr.New("SerializationError", - "failed to decode endpoint credentials", - err, + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, ) + return } // Response body format is not consistent between metadata endpoints. diff --git a/aws/credentials/endpointcreds/provider_test.go b/aws/credentials/endpointcreds/provider_test.go index d48bb018..791363f4 100644 --- a/aws/credentials/endpointcreds/provider_test.go +++ b/aws/credentials/endpointcreds/provider_test.go @@ -29,7 +29,7 @@ func TestRetrieveRefreshableCredentials(t *testing.T) { err := encoder.Encode(map[string]interface{}{ "AccessKeyID": "AKID", "SecretAccessKey": "SECRET", - "token": "TOKEN", + "Token": "TOKEN", "Expiration": time.Now().Add(1 * time.Hour), }) @@ -173,7 +173,7 @@ func TestAuthorizationToken(t *testing.T) { err := encoder.Encode(map[string]interface{}{ "AccessKeyID": "AKID", "SecretAccessKey": "SECRET", - "token": "TOKEN", + "Token": "TOKEN", "Expiration": time.Now().Add(1 * time.Hour), }) diff --git a/aws/credentials/ibmiam/common.go b/aws/credentials/ibmiam/common.go index 4d17a734..6d2fa6c8 100644 --- a/aws/credentials/ibmiam/common.go +++ b/aws/credentials/ibmiam/common.go @@ -12,7 +12,7 @@ import ( const ( // Constants // Default IBM IAM Authentication Server Endpoint - defaultAuthEndPoint = `https://iam.ng.bluemix.net/oidc/token` + defaultAuthEndPoint = `https://iam.cloud.ibm.com/identity/token` // Logger constants // Debug Log constant @@ -67,7 +67,6 @@ func NewProvider(providerName string, config *aws.Config, apiKey, authEndPoint, } provider.logLevel = logLevel - if apiKey == "" { provider.ErrorStatus = awserr.New("IbmApiKeyIdNotFound", "IBM API Key Id not found", nil) if provider.logLevel.Matches(aws.LogDebug) { diff --git a/aws/credentials/processcreds/provider_test.go b/aws/credentials/processcreds/provider_test.go index 6f0f413f..c44b6bda 100644 --- a/aws/credentials/processcreds/provider_test.go +++ b/aws/credentials/processcreds/provider_test.go @@ -15,12 +15,12 @@ import ( "github.com/IBM/ibm-cos-sdk-go/aws/awserr" "github.com/IBM/ibm-cos-sdk-go/aws/credentials/processcreds" "github.com/IBM/ibm-cos-sdk-go/aws/session" - "github.com/IBM/ibm-cos-sdk-go/awstesting" + "github.com/IBM/ibm-cos-sdk-go/internal/sdktesting" ) func TestProcessProviderFromSessionCfg(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() os.Setenv("AWS_SDK_LOAD_CONFIG", "1") if runtime.GOOS == "windows" { @@ -57,8 +57,8 @@ func TestProcessProviderFromSessionCfg(t *testing.T) { } func TestProcessProviderFromSessionWithProfileCfg(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() os.Setenv("AWS_SDK_LOAD_CONFIG", "1") os.Setenv("AWS_PROFILE", "non_expire") @@ -88,8 +88,8 @@ func TestProcessProviderFromSessionWithProfileCfg(t *testing.T) { } func TestProcessProviderNotFromCredProcCfg(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() os.Setenv("AWS_SDK_LOAD_CONFIG", "1") os.Setenv("AWS_PROFILE", "not_alone") @@ -123,8 +123,8 @@ func TestProcessProviderNotFromCredProcCfg(t *testing.T) { } func TestProcessProviderFromSessionCrd(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() if runtime.GOOS == "windows" { os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "testdata\\shcred_win.ini") @@ -160,8 +160,8 @@ func TestProcessProviderFromSessionCrd(t *testing.T) { } func TestProcessProviderFromSessionWithProfileCrd(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() os.Setenv("AWS_PROFILE", "non_expire") if runtime.GOOS == "windows" { @@ -190,8 +190,8 @@ func TestProcessProviderFromSessionWithProfileCrd(t *testing.T) { } func TestProcessProviderNotFromCredProcCrd(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() os.Setenv("AWS_PROFILE", "not_alone") if runtime.GOOS == "windows" { @@ -224,8 +224,8 @@ func TestProcessProviderNotFromCredProcCrd(t *testing.T) { } func TestProcessProviderBadCommand(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() creds := processcreds.NewCredentials("/bad/process") _, err := creds.Get() @@ -235,8 +235,8 @@ func TestProcessProviderBadCommand(t *testing.T) { } func TestProcessProviderMoreEmptyCommands(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() creds := processcreds.NewCredentials("") _, err := creds.Get() @@ -247,8 +247,8 @@ func TestProcessProviderMoreEmptyCommands(t *testing.T) { } func TestProcessProviderExpectErrors(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() creds := processcreds.NewCredentials( fmt.Sprintf( @@ -300,8 +300,8 @@ func TestProcessProviderExpectErrors(t *testing.T) { } func TestProcessProviderTimeout(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() command := "/bin/sleep 2" if runtime.GOOS == "windows" { @@ -319,8 +319,8 @@ func TestProcessProviderTimeout(t *testing.T) { } func TestProcessProviderWithLongSessionToken(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() creds := processcreds.NewCredentials( fmt.Sprintf( @@ -349,8 +349,8 @@ type credentialTest struct { } func TestProcessProviderStatic(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() // static creds := processcreds.NewCredentials( @@ -371,8 +371,8 @@ func TestProcessProviderStatic(t *testing.T) { } func TestProcessProviderNotExpired(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() // non-static, not expired exp := &credentialTest{} @@ -408,8 +408,8 @@ func TestProcessProviderNotExpired(t *testing.T) { } func TestProcessProviderExpired(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() // non-static, expired exp := &credentialTest{} @@ -445,8 +445,8 @@ func TestProcessProviderExpired(t *testing.T) { } func TestProcessProviderForceExpire(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() // non-static, not expired @@ -499,8 +499,8 @@ func TestProcessProviderForceExpire(t *testing.T) { } func TestProcessProviderAltConstruct(t *testing.T) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() // constructing with exec.Cmd instead of string myCommand := exec.Command( @@ -523,8 +523,8 @@ func TestProcessProviderAltConstruct(t *testing.T) { } func BenchmarkProcessProvider(b *testing.B) { - oldEnv := preserveImportantStashEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() creds := processcreds.NewCredentials( fmt.Sprintf( @@ -547,35 +547,6 @@ func BenchmarkProcessProvider(b *testing.B) { } } -func preserveImportantStashEnv() []string { - envsToKeep := []string{"PATH"} - - if runtime.GOOS == "windows" { - envsToKeep = append(envsToKeep, "ComSpec") - envsToKeep = append(envsToKeep, "SYSTEM32") - } - - extraEnv := getEnvs(envsToKeep) - - oldEnv := awstesting.StashEnv() //clear env - - for key, val := range extraEnv { - os.Setenv(key, val) - } - - return oldEnv -} - -func getEnvs(envs []string) map[string]string { - extraEnvs := make(map[string]string) - for _, env := range envs { - if val, ok := os.LookupEnv(env); ok && len(val) > 0 { - extraEnvs[env] = val - } - } - return extraEnvs -} - func getOSCat() string { if runtime.GOOS == "windows" { return "type" diff --git a/aws/ec2metadata/api.go b/aws/ec2metadata/api.go index c3cf2258..5bfac6cf 100644 --- a/aws/ec2metadata/api.go +++ b/aws/ec2metadata/api.go @@ -82,7 +82,7 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument doc := EC2InstanceIdentityDocument{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { return EC2InstanceIdentityDocument{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, "failed to decode EC2 instance identity document", err) } @@ -101,7 +101,7 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { info := EC2IAMInfo{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { return EC2IAMInfo{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, "failed to decode EC2 IAM info", err) } diff --git a/aws/ec2metadata/service.go b/aws/ec2metadata/service.go index 095d71ef..fe4bf837 100644 --- a/aws/ec2metadata/service.go +++ b/aws/ec2metadata/service.go @@ -123,7 +123,7 @@ func unmarshalHandler(r *request.Request) { defer r.HTTPResponse.Body.Close() b := &bytes.Buffer{} if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata response", err) return } @@ -136,7 +136,7 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() b := &bytes.Buffer{} if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err) return } diff --git a/aws/ec2metadata/service_test.go b/aws/ec2metadata/service_test.go index 41ab82f7..460c7990 100644 --- a/aws/ec2metadata/service_test.go +++ b/aws/ec2metadata/service_test.go @@ -13,8 +13,8 @@ import ( "github.com/IBM/ibm-cos-sdk-go/aws/awserr" "github.com/IBM/ibm-cos-sdk-go/aws/ec2metadata" "github.com/IBM/ibm-cos-sdk-go/aws/request" - "github.com/IBM/ibm-cos-sdk-go/awstesting" "github.com/IBM/ibm-cos-sdk-go/awstesting/unit" + "github.com/IBM/ibm-cos-sdk-go/internal/sdktesting" ) func TestClientOverrideDefaultHTTPClientTimeout(t *testing.T) { @@ -80,8 +80,8 @@ func TestClientOverrideDefaultHTTPClientTimeoutRaceWithTransport(t *testing.T) { } func TestClientDisableIMDS(t *testing.T) { - env := awstesting.StashEnv() - defer awstesting.PopEnv(env) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() os.Setenv("AWS_EC2_METADATA_DISABLED", "true") diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 17ee7c72..f8a9e7d6 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -27,6 +27,7 @@ const ( EuWest1RegionID = "eu-west-1" // EU (Ireland). EuWest2RegionID = "eu-west-2" // EU (London). EuWest3RegionID = "eu-west-3" // EU (Paris). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). UsEast2RegionID = "us-east-2" // US East (Ohio). @@ -82,7 +83,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$") return reg }(), }, @@ -128,6 +129,9 @@ var awsPartition = partition{ "eu-west-3": region{ Description: "EU (Paris)", }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", + }, "sa-east-1": region{ Description: "South America (Sao Paulo)", }, @@ -166,6 +170,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -178,6 +183,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -270,6 +276,12 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + "me-south-1": endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, "sa-east-1": endpoint{ Hostname: "api.ecr.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -308,6 +320,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, @@ -327,6 +340,7 @@ var awsPartition = partition{ "api.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -334,8 +348,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", @@ -381,6 +398,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -409,6 +427,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -416,6 +435,24 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "appmesh": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "appstream2": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -460,6 +497,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -484,6 +522,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -515,9 +554,27 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "backup": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "batch": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -529,6 +586,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -584,6 +642,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -619,6 +678,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -674,6 +734,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -710,6 +771,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -720,6 +782,7 @@ var awsPartition = partition{ "codebuild": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -727,9 +790,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -771,6 +836,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -802,6 +868,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -939,10 +1006,13 @@ var awsPartition = partition{ "comprehendmedical": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "config": service{ @@ -960,6 +1030,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1005,10 +1076,34 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "dax": service{ @@ -1047,6 +1142,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1075,6 +1171,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1097,6 +1194,12 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, "eu-central-1": endpoint{ Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1109,6 +1212,12 @@ var awsPartition = partition{ Region: "eu-west-1", }, }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, "us-east-1": endpoint{ Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1139,6 +1248,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, @@ -1160,11 +1270,17 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -1172,11 +1288,36 @@ var awsPartition = partition{ Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "ec2": service{ @@ -1196,6 +1337,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1229,6 +1371,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1257,16 +1400,18 @@ var awsPartition = partition{ Region: "us-west-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "elasticbeanstalk": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1278,6 +1423,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1290,12 +1436,14 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1319,6 +1467,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1346,6 +1495,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", @@ -1410,11 +1560,12 @@ var awsPartition = partition{ Region: "us-west-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "events": service{ @@ -1432,6 +1583,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1442,6 +1594,7 @@ var awsPartition = partition{ "firehose": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1449,6 +1602,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1481,10 +1635,15 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1524,6 +1683,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1534,6 +1694,7 @@ var awsPartition = partition{ "glue": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1541,9 +1702,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1557,19 +1720,32 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, + "groundstation": service{ + + Endpoints: endpoints{ + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "guardduty": service{ IsRegionalized: boxedTrue, Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1630,7 +1806,9 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1649,11 +1827,16 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1668,13 +1851,92 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "iotevents": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "kafka": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1695,6 +1957,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1707,11 +1970,14 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1731,12 +1997,6 @@ var awsPartition = partition{ "kms": service{ Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1749,6 +2009,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1756,6 +2017,16 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "lakeformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "lambda": service{ Endpoints: endpoints{ @@ -1771,6 +2042,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1781,16 +2053,23 @@ var awsPartition = partition{ "license-manager": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1827,6 +2106,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1895,6 +2175,7 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, @@ -1950,6 +2231,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1998,6 +2280,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2010,11 +2293,14 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -2070,6 +2356,12 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + "eu-north-1": endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, "eu-west-1": endpoint{ Hostname: "rds.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2185,6 +2477,18 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "projects.iot1click": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "ram": service{ Endpoints: endpoints{ @@ -2195,6 +2499,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2219,6 +2524,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{dnsSuffix}", @@ -2243,6 +2549,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2256,10 +2563,14 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -2278,6 +2589,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2289,8 +2601,11 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -2349,6 +2664,7 @@ var awsPartition = partition{ "runtime.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2356,12 +2672,39 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "s3": service{ @@ -2397,8 +2740,9 @@ var awsPartition = partition{ Hostname: "s3.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "s3-external-1": endpoint{ Hostname: "s3-external-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -2642,6 +2986,7 @@ var awsPartition = partition{ "securityhub": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2649,6 +2994,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2792,6 +3138,7 @@ var awsPartition = partition{ "sms": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2803,6 +3150,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2814,6 +3162,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2846,6 +3195,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2895,7 +3245,8 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "queue.{dnsSuffix}", }, @@ -2919,6 +3270,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2941,6 +3293,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2951,6 +3304,7 @@ var awsPartition = partition{ "storagegateway": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2962,6 +3316,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2983,11 +3338,17 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -2995,11 +3356,36 @@ var awsPartition = partition{ Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "sts": service{ @@ -3034,8 +3420,14 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "me-south-1": endpoint{ + Hostname: "sts.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "sts-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -3066,9 +3458,15 @@ var awsPartition = partition{ }, }, "support": service{ + PartitionEndpoint: "aws-global", Endpoints: endpoints{ - "us-east-1": endpoint{}, + "aws-global": endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, "swf": service{ @@ -3086,6 +3484,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3108,6 +3507,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3125,9 +3525,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3139,7 +3541,11 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, @@ -3183,12 +3589,16 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3235,6 +3645,7 @@ var awsPartition = partition{ "xray": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3246,6 +3657,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3511,6 +3923,15 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -3531,7 +3952,8 @@ var awscnPartition = partition{ }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "kinesis": service{ @@ -3541,6 +3963,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "kms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "lambda": service{ Endpoints: endpoints{ @@ -3548,6 +3977,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "license-manager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "logs": service{ Endpoints: endpoints{ @@ -3698,6 +4134,18 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, "swf": service{ Endpoints: endpoints{ @@ -3805,6 +4253,7 @@ var awsusgovPartition = partition{ "athena": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -3854,9 +4303,17 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "codebuild": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "codecommit": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -3894,6 +4351,18 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "datasync": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, "directconnect": service{ Endpoints: endpoints{ @@ -3919,6 +4388,12 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "dynamodb.us-gov-west-1.amazonaws.com", @@ -4020,6 +4495,7 @@ var awsusgovPartition = partition{ "firehose": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -4034,6 +4510,16 @@ var awsusgovPartition = partition{ }, "glue": service{ + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, @@ -4047,6 +4533,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "health": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -4131,6 +4623,7 @@ var awsusgovPartition = partition{ }, }, Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -4141,6 +4634,17 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "neptune": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "organizations": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -4160,6 +4664,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "ram": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "rds": service{ Endpoints: endpoints{ @@ -4180,6 +4690,19 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "runtime.sagemaker": service{ Endpoints: endpoints{ @@ -4243,6 +4766,31 @@ var awsusgovPartition = partition{ }, }, }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, "sms": service{ Endpoints: endpoints{ @@ -4304,6 +4852,12 @@ var awsusgovPartition = partition{ }, Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "dynamodb.us-gov-west-1.amazonaws.com", diff --git a/aws/endpoints/endpoints.go b/aws/endpoints/endpoints.go index 4982af36..cfedcd65 100644 --- a/aws/endpoints/endpoints.go +++ b/aws/endpoints/endpoints.go @@ -170,10 +170,13 @@ func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { // A Partition provides the ability to enumerate the partition's regions // and services. type Partition struct { - id string - p *partition + id, dnsSuffix string + p *partition } +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + // ID returns the identifier of the partition. func (p Partition) ID() string { return p.id } diff --git a/aws/endpoints/endpoints_test.go b/aws/endpoints/endpoints_test.go index de0e5302..7c569258 100644 --- a/aws/endpoints/endpoints_test.go +++ b/aws/endpoints/endpoints_test.go @@ -169,6 +169,9 @@ func TestResolveEndpointForPartition(t *testing.T) { enum := testPartitions.Partitions()[0] expected, err := testPartitions.EndpointFor("service1", "us-east-1") + if err != nil { + t.Fatalf("unexpected error, %v", err) + } actual, err := enum.EndpointFor("service1", "us-east-1") if err != nil { @@ -327,8 +330,11 @@ func TestPartitionForRegion(t *testing.T) { if !ok { t.Fatalf("expect partition to be found") } + if e, a := expect.DNSSuffix(), actual.DNSSuffix(); e != a { + t.Errorf("expect %s partition DNSSuffix, got %s", e, a) + } if e, a := expect.ID(), actual.ID(); e != a { - t.Errorf("expect %s partition, got %s", e, a) + t.Errorf("expect %s partition ID, got %s", e, a) } } diff --git a/aws/endpoints/example_test.go b/aws/endpoints/example_test.go index f833c14d..e32d847a 100644 --- a/aws/endpoints/example_test.go +++ b/aws/endpoints/example_test.go @@ -52,14 +52,4 @@ func ExampleResolverFunc() { Bucket: aws.String("myBucket"), Key: aws.String("myObjectKey"), }) - - //// Create the SQS service client with the shared session. This will - //// fallback to the default endpoint resolver because the customization - //// passes any non S3 service endpoint resolve to the default resolver. - //sqsSvc := sqs.New(sess) - //// Operation calls will be made to the default endpoint for SQS for the - //// region configured. - //sqsSvc.ReceiveMessage(&sqs.ReceiveMessageInput{ - // QueueUrl: aws.String("my-queue-url"), - //}) } diff --git a/aws/endpoints/v3model.go b/aws/endpoints/v3model.go index ff6f76db..523ad79a 100644 --- a/aws/endpoints/v3model.go +++ b/aws/endpoints/v3model.go @@ -54,8 +54,9 @@ type partition struct { func (p partition) Partition() Partition { return Partition{ - id: p.ID, - p: &p, + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, } } diff --git a/aws/request/connection_reset_error.go b/aws/request/connection_reset_error.go index 271da432..d9b37f4d 100644 --- a/aws/request/connection_reset_error.go +++ b/aws/request/connection_reset_error.go @@ -1,18 +1,17 @@ -// +build !appengine,!plan9 - package request import ( - "net" - "os" - "syscall" + "strings" ) func isErrConnectionReset(err error) bool { - if opErr, ok := err.(*net.OpError); ok { - if sysErr, ok := opErr.Err.(*os.SyscallError); ok { - return sysErr.Err == syscall.ECONNRESET - } + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true } return false diff --git a/aws/request/connection_reset_error_other.go b/aws/request/connection_reset_error_other.go deleted file mode 100644 index daf9eca4..00000000 --- a/aws/request/connection_reset_error_other.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine plan9 - -package request - -import ( - "strings" -) - -func isErrConnectionReset(err error) bool { - return strings.Contains(err.Error(), "connection reset") -} diff --git a/aws/request/connection_reset_error_other_test.go b/aws/request/connection_reset_error_other_test.go deleted file mode 100644 index 51723540..00000000 --- a/aws/request/connection_reset_error_other_test.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build appengine plan9 - -package request_test - -import ( - "errors" -) - -var ( - errAcceptConnectionResetStub = errors.New("accept: connection reset") - errReadConnectionResetStub = errors.New("read: connection reset") -) diff --git a/aws/request/connection_reset_error_test.go b/aws/request/connection_reset_error_test.go index 6f93b854..71814a50 100644 --- a/aws/request/connection_reset_error_test.go +++ b/aws/request/connection_reset_error_test.go @@ -3,11 +3,126 @@ package request_test import ( - "net" - "syscall" -) + "net/http" + "reflect" + "testing" + "time" -var ( - errAcceptConnectionResetStub = &net.OpError{Op: "accept", Err: syscall.ECONNRESET} - errReadConnectionResetStub = &net.OpError{Op: "read", Err: syscall.ECONNRESET} + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/client" + "github.com/IBM/ibm-cos-sdk-go/aws/client/metadata" + "github.com/IBM/ibm-cos-sdk-go/aws/corehandlers" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + v4 "github.com/IBM/ibm-cos-sdk-go/aws/signer/v4" + "github.com/IBM/ibm-cos-sdk-go/awstesting/unit" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/jsonrpc" ) + +type connResetCloser struct { + Err error +} + +func (rc *connResetCloser) Read(b []byte) (int, error) { + return 0, rc.Err +} + +func (rc *connResetCloser) Close() error { + return nil +} + +func TestSerializationErrConnectionReset_accept(t *testing.T) { + cases := map[string]struct { + Err error + ExpectAttempts int + }{ + "accept with temporary": { + Err: errAcceptConnectionResetStub, + ExpectAttempts: 6, + }, + "read not temporary": { + Err: errReadConnectionResetStub, + ExpectAttempts: 1, + }, + "write with temporary": { + Err: errWriteConnectionResetStub, + ExpectAttempts: 6, + }, + "write broken pipe with temporary": { + Err: errWriteBrokenPipeStub, + ExpectAttempts: 6, + }, + "generic connection reset": { + Err: errConnectionResetStub, + ExpectAttempts: 6, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + count := 0 + handlers := request.Handlers{} + handlers.Send.PushBack(func(r *request.Request) { + count++ + r.HTTPResponse = &http.Response{} + r.HTTPResponse.Body = &connResetCloser{ + Err: c.Err, + } + }) + + handlers.Sign.PushBackNamed(v4.SignRequestHandler) + handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + + op := &request.Operation{ + Name: "op", + HTTPMethod: "POST", + HTTPPath: "/", + } + + meta := metadata.ClientInfo{ + ServiceName: "fooService", + SigningName: "foo", + SigningRegion: "foo", + Endpoint: "localhost", + APIVersion: "2001-01-01", + JSONVersion: "1.1", + TargetPrefix: "Foo", + } + cfg := unit.Session.Config.Copy() + cfg.MaxRetries = aws.Int(5) + cfg.SleepDelay = func(time.Duration) {} + + req := request.New( + *cfg, + meta, + handlers, + client.DefaultRetryer{NumMaxRetries: 5}, + op, + &struct{}{}, + &struct{}{}, + ) + + osErr := c.Err + req.ApplyOptions(request.WithResponseReadTimeout(time.Second)) + err := req.Send() + if err == nil { + t.Error("Expected error 'SerializationError', but received nil") + } + if aerr, ok := err.(awserr.Error); ok && aerr.Code() != request.ErrCodeSerialization { + t.Errorf("Expected 'SerializationError', but received %q", aerr.Code()) + } else if !ok { + t.Errorf("Expected 'awserr.Error', but received %v", reflect.TypeOf(err)) + } else if aerr.OrigErr().Error() != osErr.Error() { + t.Errorf("Expected %q, but received %q", osErr.Error(), aerr.OrigErr().Error()) + } + + if e, a := c.ExpectAttempts, count; e != a { + t.Errorf("Expected %v, but received %v", e, a) + } + }) + } +} diff --git a/aws/request/handlers.go b/aws/request/handlers.go index 8ef8548a..627ec722 100644 --- a/aws/request/handlers.go +++ b/aws/request/handlers.go @@ -59,6 +59,51 @@ func (h *Handlers) Clear() { h.Complete.Clear() } +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + // A HandlerListRunItem represents an entry in the HandlerList which // is being run. type HandlerListRunItem struct { diff --git a/aws/request/http_request_retry_test.go b/aws/request/http_request_retry_test.go index 5a89ae7e..e114c21d 100644 --- a/aws/request/http_request_retry_test.go +++ b/aws/request/http_request_retry_test.go @@ -16,7 +16,9 @@ func TestRequestCancelRetry(t *testing.T) { c := make(chan struct{}) reqNum := 0 - s := mock.NewMockClient(aws.NewConfig().WithMaxRetries(10)) + s := mock.NewMockClient(&aws.Config{ + MaxRetries: aws.Int(1), + }) s.Handlers.Validate.Clear() s.Handlers.Unmarshal.Clear() s.Handlers.UnmarshalMeta.Clear() diff --git a/aws/request/offset_reader.go b/aws/request/offset_reader.go index 9c45bc1d..d6c80a42 100644 --- a/aws/request/offset_reader.go +++ b/aws/request/offset_reader.go @@ -15,12 +15,15 @@ type offsetReader struct { closed bool } -func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { reader := &offsetReader{} - buf.Seek(offset, sdkio.SeekStart) + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } reader.buf = buf - return reader + return reader, nil } // Close will close the instance of the offset reader's access to @@ -54,7 +57,9 @@ func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { // CloseAndCopy will return a new offsetReader with a copy of the old buffer // and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { - o.Close() +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } return newOffsetReader(o.buf, offset) } diff --git a/aws/request/offset_reader_test.go b/aws/request/offset_reader_test.go index d81635b7..f680786e 100644 --- a/aws/request/offset_reader_test.go +++ b/aws/request/offset_reader_test.go @@ -23,7 +23,7 @@ func TestOffsetReaderRead(t *testing.T) { t.Errorf("expect %v, got %v", e, a) } if err != nil { - t.Errorf("expect nil, %v", err) + t.Fatalf("expect no error, got %v", err) } if e, a := buf, tempBuf; !bytes.Equal(e, a) { t.Errorf("expect %v, got %v", e, a) @@ -32,11 +32,14 @@ func TestOffsetReaderRead(t *testing.T) { func TestOffsetReaderSeek(t *testing.T) { buf := []byte("testData") - reader := newOffsetReader(bytes.NewReader(buf), 0) + reader, err := newOffsetReader(bytes.NewReader(buf), 0) + if err != nil { + t.Fatalf("expect no error, got %v", err) + } orig, err := reader.Seek(0, sdkio.SeekCurrent) if err != nil { - t.Errorf("expect nil, %v", err) + t.Fatalf("expect no error, got %v", err) } if e, a := int64(0), orig; e != a { t.Errorf("expect %v, got %v", e, a) @@ -44,7 +47,7 @@ func TestOffsetReaderSeek(t *testing.T) { n, err := reader.Seek(0, sdkio.SeekEnd) if err != nil { - t.Errorf("expect nil, %v", err) + t.Fatalf("expect no error, got %v", err) } if e, a := int64(len(buf)), n; e != a { t.Errorf("expect %v, got %v", e, a) @@ -52,7 +55,7 @@ func TestOffsetReaderSeek(t *testing.T) { n, err = reader.Seek(orig, sdkio.SeekStart) if err != nil { - t.Errorf("expect nil, %v", err) + t.Fatalf("expect no error, got %v", err) } if e, a := int64(0), n; e != a { t.Errorf("expect %v, got %v", e, a) @@ -65,7 +68,7 @@ func TestOffsetReaderClose(t *testing.T) { err := reader.Close() if err != nil { - t.Errorf("expect nil, %v", err) + t.Fatalf("expect no error, got %v", err) } tempBuf := make([]byte, len(buf)) @@ -83,7 +86,10 @@ func TestOffsetReaderCloseAndCopy(t *testing.T) { tempBuf := make([]byte, len(buf)) reader := &offsetReader{buf: bytes.NewReader(buf)} - newReader := reader.CloseAndCopy(0) + newReader, err := reader.CloseAndCopy(0) + if err != nil { + t.Fatalf("expect no error, got %v", err) + } n, err := reader.Read(tempBuf) if e, a := n, 0; e != a { @@ -98,7 +104,7 @@ func TestOffsetReaderCloseAndCopy(t *testing.T) { t.Errorf("expect %v, got %v", e, a) } if err != nil { - t.Errorf("expect nil, %v", err) + t.Fatalf("expect no error, got %v", err) } if e, a := buf, tempBuf; !bytes.Equal(e, a) { t.Errorf("expect %v, got %v", e, a) @@ -110,13 +116,17 @@ func TestOffsetReaderCloseAndCopyOffset(t *testing.T) { tempBuf := make([]byte, len(buf)) reader := &offsetReader{buf: bytes.NewReader(buf)} - newReader := reader.CloseAndCopy(4) + newReader, err := reader.CloseAndCopy(4) + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + n, err := newReader.Read(tempBuf) if e, a := n, len(buf)-4; e != a { t.Errorf("expect %v, got %v", e, a) } if err != nil { - t.Errorf("expect nil, %v", err) + t.Fatalf("expect no error, got %v", err) } expected := []byte{'D', 'a', 't', 'a', 0, 0, 0, 0} diff --git a/aws/request/request.go b/aws/request/request.go index 67d1137f..a86c8e35 100644 --- a/aws/request/request.go +++ b/aws/request/request.go @@ -64,6 +64,15 @@ type Request struct { LastSignedAt time.Time DisableFollowRedirects bool + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + // A value greater than 0 instructs the request to be signed as Presigned URL // You should not set this field directly. Instead use Request's // Presign or PresignRequest methods. @@ -231,6 +240,10 @@ func (r *Request) WillRetry() bool { return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() } +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + // ParamsFilled returns if the request's parameters have been populated // and the parameters are valid. False is returned if no parameters are // provided or invalid. @@ -259,7 +272,18 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.Body = reader - r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } r.ResetBody() } @@ -330,16 +354,15 @@ func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, err return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil } -func debugLogReqError(r *Request, stage string, retrying bool, err error) { +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { return } - retryStr := "not retrying" - if retrying { - retryStr = "will retry" - } - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) } @@ -358,12 +381,12 @@ func (r *Request) Build() error { if !r.built { r.Handlers.Validate.Run(r) if r.Error != nil { - debugLogReqError(r, "Validate Request", false, r.Error) + debugLogReqError(r, "Validate Request", notRetrying, r.Error) return r.Error } r.Handlers.Build.Run(r) if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) + debugLogReqError(r, "Build Request", notRetrying, r.Error) return r.Error } r.built = true @@ -379,7 +402,7 @@ func (r *Request) Build() error { func (r *Request) Sign() error { r.Build() if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) + debugLogReqError(r, "Build Request", notRetrying, r.Error) return r.Error } @@ -387,12 +410,16 @@ func (r *Request) Sign() error { return r.Error } -func (r *Request) getNextRequestBody() (io.ReadCloser, error) { +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { if r.safeBody != nil { r.safeBody.Close() } - r.safeBody = newOffsetReader(r.Body, r.BodyStart) + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } // Go 1.8 tightened and clarified the rules code needs to use when building // requests with the http package. Go 1.8 removed the automatic detection @@ -409,10 +436,10 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { // Related golang/go#18257 l, err := aws.SeekerLen(r.Body) if err != nil { - return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) } - var body io.ReadCloser if l == 0 { body = NoBody } else if l > 0 { @@ -473,29 +500,28 @@ func (r *Request) Send() error { r.AttemptTime = time.Now() if err := r.Sign(); err != nil { - debugLogReqError(r, "Sign Request", false, err) + debugLogReqError(r, "Sign Request", notRetrying, err) return err } if err := r.sendRequest(); err == nil { return nil - } else if !shouldRetryCancel(r.Error) { - return err - } else { - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) - if r.Error != nil || !aws.BoolValue(r.Retryable) { - return r.Error - } + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } - r.prepareRetry() - continue + if err := r.prepareRetry(); err != nil { + r.Error = err + return err } } } -func (r *Request) prepareRetry() { +func (r *Request) prepareRetry() error { if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) @@ -506,12 +532,19 @@ func (r *Request) prepareRetry() { // the request's body even though the Client's Do returned. r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } // Closing response body to ensure that no response body is leaked // between retry attempts. if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { r.HTTPResponse.Body.Close() } + + return nil } func (r *Request) sendRequest() (sendErr error) { @@ -520,7 +553,9 @@ func (r *Request) sendRequest() (sendErr error) { r.Retryable = nil r.Handlers.Send.Run(r) if r.Error != nil { - debugLogReqError(r, "Send Request", r.WillRetry(), r.Error) + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) return r.Error } @@ -528,13 +563,17 @@ func (r *Request) sendRequest() (sendErr error) { r.Handlers.ValidateResponse.Run(r) if r.Error != nil { r.Handlers.UnmarshalError.Run(r) - debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) return r.Error } r.Handlers.Unmarshal.Run(r) if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error) + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) return r.Error } @@ -561,48 +600,6 @@ func AddToUserAgent(r *Request, s string) { r.HTTPRequest.Header.Set("User-Agent", s) } -type temporary interface { - Temporary() bool -} - -func shouldRetryCancel(err error) bool { - switch err := err.(type) { - case awserr.Error: - if err.Code() == CanceledErrorCode { - return false - } - return shouldRetryCancel(err.OrigErr()) - case *url.Error: - if strings.Contains(err.Error(), "connection refused") { - // Refused connections should be retried as the service may not yet - // be running on the port. Go TCP dial considers refused - // connections as not temporary. - return true - } - // *url.Error only implements Temporary after golang 1.6 but since - // url.Error only wraps the error: - return shouldRetryCancel(err.Err) - case temporary: - // If the error is temporary, we want to allow continuation of the - // retry process - return err.Temporary() - case nil: - // `awserr.Error.OrigErr()` can be nil, meaning there was an error but - // because we don't know the cause, it is marked as retriable. See - // TestRequest4xxUnretryable for an example. - return true - default: - switch err.Error() { - case "net/http: request canceled", - "net/http: request canceled while waiting for connection": - // known 1.5 error case when an http request is cancelled - return false - } - // here we don't know the error; so we allow a retry. - return true - } -} - // SanitizeHostForHeader removes default port from host and updates request.Host func SanitizeHostForHeader(r *http.Request) { host := getHost(r) diff --git a/aws/request/request_1_8.go b/aws/request/request_1_8.go index 7c6a8000..1e0fc174 100644 --- a/aws/request/request_1_8.go +++ b/aws/request/request_1_8.go @@ -4,6 +4,8 @@ package request import ( "net/http" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" ) // NoBody is a http.NoBody reader instructing Go HTTP client to not include @@ -24,7 +26,8 @@ var NoBody = http.NoBody func (r *Request) ResetBody() { body, err := r.getNextRequestBody() if err != nil { - r.Error = err + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) return } diff --git a/aws/request/request_pagination.go b/aws/request/request_pagination.go index 71db2ed0..6da5bd8d 100644 --- a/aws/request/request_pagination.go +++ b/aws/request/request_pagination.go @@ -146,7 +146,7 @@ func (r *Request) nextPageTokens() []interface{} { return nil } case bool: - if v == false { + if !v { return nil } } diff --git a/aws/request/request_retry_test.go b/aws/request/request_retry_test.go index 89217fca..2dcffa7d 100644 --- a/aws/request/request_retry_test.go +++ b/aws/request/request_retry_test.go @@ -6,8 +6,11 @@ import ( "net/http" "net/http/httptest" "net/url" + "os" "testing" "time" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" ) func newRequest(t *testing.T, url string) *http.Request { @@ -18,22 +21,22 @@ func newRequest(t *testing.T, url string) *http.Request { return r } -func TestShouldRetryCancel_nil(t *testing.T) { - if shouldRetryCancel(nil) != true { - t.Error("shouldRetryCancel(nil) should return true") +func TestShouldRetryError_nil(t *testing.T) { + if shouldRetryError(nil) != true { + t.Error("shouldRetryError(nil) should return true") } } -func TestShouldRetryCancel_timeout(t *testing.T) { +func TestShouldRetryError_timeout(t *testing.T) { tr := &http.Transport{} defer tr.CloseIdleConnections() - cli := http.Client{ + client := http.Client{ Timeout: time.Nanosecond, Transport: tr, } - resp, err := cli.Do(newRequest(t, "https://179.179.179.179/no/such/host")) + resp, err := client.Do(newRequest(t, "https://179.179.179.179/no/such/host")) if resp != nil { resp.Body.Close() } @@ -42,15 +45,15 @@ func TestShouldRetryCancel_timeout(t *testing.T) { } debugerr(t, err) - if shouldRetryCancel(err) == false { + if shouldRetryError(err) == false { t.Errorf("this request timed out and should be retried") } } -func TestShouldRetryCancel_cancelled(t *testing.T) { +func TestShouldRetryError_cancelled(t *testing.T) { tr := &http.Transport{} defer tr.CloseIdleConnections() - cli := http.Client{ + client := http.Client{ Transport: tr, } @@ -79,7 +82,7 @@ func TestShouldRetryCancel_cancelled(t *testing.T) { close(ch) // request is cancelled before anything }() - resp, err := cli.Do(r) + resp, err := client.Do(r) if resp != nil { resp.Body.Close() } @@ -89,11 +92,37 @@ func TestShouldRetryCancel_cancelled(t *testing.T) { debugerr(t, err) - if shouldRetryCancel(err) == true { + if shouldRetryError(err) == true { t.Errorf("this request was cancelled and should not be retried") } } +func TestShouldRetry(t *testing.T) { + + syscallError := os.SyscallError{ + Err: ErrInvalidParams{}, + Syscall: "open", + } + + opError := net.OpError{ + Op: "dial", + Net: "tcp", + Source: net.Addr(nil), + Err: &syscallError, + } + + urlError := url.Error{ + Op: "Post", + URL: "https://localhost:52398", + Err: &opError, + } + origError := awserr.New("ErrorTestShouldRetry", "Test should retry when error received", &urlError).OrigErr() + if e, a := true, shouldRetryError(origError); e != a { + t.Errorf("Expected to return %v to retry when error occured, got %v instead", e, a) + } + +} + func debugerr(t *testing.T, err error) { t.Logf("Error, %v", err) @@ -102,10 +131,7 @@ func debugerr(t *testing.T, err error) { t.Logf("%s is a temporary error: %t", err, err.Temporary()) return case *url.Error: - // we should be before 1.5 - // that's our case ! - t.Logf("err: %s", err) - t.Logf("err: %#v", err.Err) + t.Logf("err: %s, nested err: %#v", err, err.Err) if operr, ok := err.Err.(*net.OpError); ok { t.Logf("operr: %#v", operr) } @@ -115,3 +141,62 @@ func debugerr(t *testing.T, err error) { return } } + +func TestRequest_retryCustomCodes(t *testing.T) { + cases := map[string]struct { + Code string + RetryErrorCodes []string + ThrottleErrorCodes []string + Retryable bool + Throttle bool + }{ + "retry code": { + Code: "RetryMePlease", + RetryErrorCodes: []string{ + "RetryMePlease", + "SomeOtherError", + }, + Retryable: true, + }, + "throttle code": { + Code: "AThrottleableError", + RetryErrorCodes: []string{ + "RetryMePlease", + "SomeOtherError", + }, + ThrottleErrorCodes: []string{ + "AThrottleableError", + "SomeOtherError", + }, + Throttle: true, + }, + "unknown code": { + Code: "UnknownCode", + RetryErrorCodes: []string{ + "RetryMePlease", + "SomeOtherError", + }, + Retryable: false, + }, + } + + for name, c := range cases { + req := Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{}, + Error: awserr.New(c.Code, "some error", nil), + RetryErrorCodes: c.RetryErrorCodes, + ThrottleErrorCodes: c.ThrottleErrorCodes, + } + + retryable := req.IsErrorRetryable() + if e, a := c.Retryable, retryable; e != a { + t.Errorf("%s, expect %v retryable, got %v", name, e, a) + } + + throttle := req.IsErrorThrottle() + if e, a := c.Throttle, throttle; e != a { + t.Errorf("%s, expect %v throttle, got %v", name, e, a) + } + } +} diff --git a/aws/request/request_test.go b/aws/request/request_test.go index f03a5f34..b1d7af1a 100644 --- a/aws/request/request_test.go +++ b/aws/request/request_test.go @@ -21,19 +21,54 @@ import ( "github.com/IBM/ibm-cos-sdk-go/aws/awserr" "github.com/IBM/ibm-cos-sdk-go/aws/client" "github.com/IBM/ibm-cos-sdk-go/aws/client/metadata" - "github.com/IBM/ibm-cos-sdk-go/aws/corehandlers" "github.com/IBM/ibm-cos-sdk-go/aws/credentials" "github.com/IBM/ibm-cos-sdk-go/aws/defaults" "github.com/IBM/ibm-cos-sdk-go/aws/request" - "github.com/IBM/ibm-cos-sdk-go/aws/signer/v4" "github.com/IBM/ibm-cos-sdk-go/awstesting" "github.com/IBM/ibm-cos-sdk-go/awstesting/unit" - "github.com/IBM/ibm-cos-sdk-go/private/protocol/jsonrpc" "github.com/IBM/ibm-cos-sdk-go/private/protocol/rest" ) var errTimeout = awserr.New("foo", "bar", errors.New("net/http: request canceled Timeout")) +type tempNetworkError struct { + op string + msg string + isTemp bool +} + +func (e *tempNetworkError) Temporary() bool { return e.isTemp } +func (e *tempNetworkError) Error() string { + return fmt.Sprintf("%s: %s", e.op, e.msg) +} + +var ( + // net.OpError accept, are always temporary + errAcceptConnectionResetStub = &tempNetworkError{ + isTemp: true, op: "accept", msg: "connection reset", + } + + // net.OpError read for ECONNRESET is not temporary. + errReadConnectionResetStub = &tempNetworkError{ + isTemp: false, op: "read", msg: "connection reset", + } + + // net.OpError write for ECONNRESET may not be temporary, but is treaded as + // temporary by the SDK. + errWriteConnectionResetStub = &tempNetworkError{ + isTemp: false, op: "write", msg: "connection reset", + } + + // net.OpError write for broken pipe may not be temporary, but is treaded as + // temporary by the SDK. + errWriteBrokenPipeStub = &tempNetworkError{ + isTemp: false, op: "write", msg: "broken pipe", + } + + // Generic connection reset error + errConnectionResetStub = errors.New("connection reset") +) + type testData struct { Data string } @@ -47,7 +82,6 @@ func unmarshal(req *request.Request) { if req.Data != nil { json.NewDecoder(req.HTTPResponse.Body).Decode(req.Data) } - return } func unmarshalError(req *request.Request) { @@ -105,7 +139,7 @@ func TestRequestRecoverRetry5xx(t *testing.T) { if err != nil { t.Fatalf("expect no error, but got %v", err) } - if e, a := 2, int(r.RetryCount); e != a { + if e, a := 2, r.RetryCount; e != a { t.Errorf("expect %d retry count, got %d", e, a) } if e, a := "valid", out.Data; e != a { @@ -138,7 +172,7 @@ func TestRequestRecoverRetry4xxRetryable(t *testing.T) { if err != nil { t.Fatalf("expect no error, but got %v", err) } - if e, a := 3, int(r.RetryCount); e != a { + if e, a := 3, r.RetryCount; e != a { t.Errorf("expect %d retry count, got %d", e, a) } if e, a := "valid", out.Data; e != a { @@ -148,13 +182,18 @@ func TestRequestRecoverRetry4xxRetryable(t *testing.T) { // test that retries don't occur for 4xx status codes with a response type that can't be retried func TestRequest4xxUnretryable(t *testing.T) { - s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s := awstesting.NewClient(&aws.Config{ + MaxRetries: aws.Int(1), + }) s.Handlers.Validate.Clear() s.Handlers.Unmarshal.PushBack(unmarshal) s.Handlers.UnmarshalError.PushBack(unmarshalError) s.Handlers.Send.Clear() // mock sending s.Handlers.Send.PushBack(func(r *request.Request) { - r.HTTPResponse = &http.Response{StatusCode: 401, Body: body(`{"__type":"SignatureDoesNotMatch","message":"Signature does not match."}`)} + r.HTTPResponse = &http.Response{ + StatusCode: 401, + Body: body(`{"__type":"SignatureDoesNotMatch","message":"Signature does not match."}`), + } }) out := &testData{} r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) @@ -172,7 +211,7 @@ func TestRequest4xxUnretryable(t *testing.T) { if e, a := "Signature does not match.", aerr.Message(); e != a { t.Errorf("expect %q error message, got %q", e, a) } - if e, a := 0, int(r.RetryCount); e != a { + if e, a := 0, r.RetryCount; e != a { t.Errorf("expect %d retry count, got %d", e, a) } } @@ -215,7 +254,7 @@ func TestRequestExhaustRetries(t *testing.T) { if e, a := "An error occurred.", aerr.Message(); e != a { t.Errorf("expect %q error message, got %q", e, a) } - if e, a := 3, int(r.RetryCount); e != a { + if e, a := 3, r.RetryCount; e != a { t.Errorf("expect %d retry count, got %d", e, a) } @@ -276,7 +315,7 @@ func TestRequestRecoverExpiredCreds(t *testing.T) { t.Errorf("Expect valid creds after cred expired recovery") } - if e, a := 1, int(r.RetryCount); e != a { + if e, a := 1, r.RetryCount; e != a { t.Errorf("expect %d retry count, got %d", e, a) } if e, a := "valid", out.Data; e != a { @@ -306,22 +345,21 @@ func TestMakeAddtoUserAgentFreeFormHandler(t *testing.T) { } } -// disabled needs anonymous access / environment set credentials -//func TestRequestUserAgent(t *testing.T) { -// s := awstesting.NewClient(&aws.Config{Region: aws.String("us-east-1")}) -// -// req := s.NewRequest(&request.Operation{Name: "Operation"}, nil, &testData{}) -// req.HTTPRequest.Header.Set("User-Agent", "foo/bar") -// if err := req.Build(); err != nil { -// t.Fatalf("expect no error, got %v", err) -// } -// -// expectUA := fmt.Sprintf("foo/bar %s/%s (%s; %s; %s)", -// aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) -// if e, a := expectUA, req.HTTPRequest.Header.Get("User-Agent"); !strings.HasPrefix(a, e) { -// t.Errorf("expect %q user agent, got %q", e, a) -// } -//} +// func TestRequestUserAgent(t *testing.T) { +// s := awstesting.NewClient(&aws.Config{Region: aws.String("us-east-1")}) + +// req := s.NewRequest(&request.Operation{Name: "Operation"}, nil, &testData{}) +// req.HTTPRequest.Header.Set("User-Agent", "foo/bar") +// if err := req.Build(); err != nil { +// t.Fatalf("expect no error, got %v", err) +// } + +// expectUA := fmt.Sprintf("foo/bar %s/%s (%s; %s; %s)", +// aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) +// if e, a := expectUA, req.HTTPRequest.Header.Get("User-Agent"); !strings.HasPrefix(a, e) { +// t.Errorf("expect %q user agent, got %q", e, a) +// } +// } func TestRequestThrottleRetries(t *testing.T) { delays := []time.Duration{} @@ -361,7 +399,7 @@ func TestRequestThrottleRetries(t *testing.T) { if e, a := "An error occurred.", aerr.Message(); e != a { t.Errorf("expect %q error message, got %q", e, a) } - if e, a := 3, int(r.RetryCount); e != a { + if e, a := 3, r.RetryCount; e != a { t.Errorf("expect %d retry count, got %d", e, a) } @@ -411,7 +449,7 @@ func TestRequestRecoverTimeoutWithNilBody(t *testing.T) { if err != nil { t.Fatalf("expect no error, but got %v", err) } - if e, a := 1, int(r.RetryCount); e != a { + if e, a := 1, r.RetryCount; e != a { t.Errorf("expect %d retry count, got %d", e, a) } if e, a := "valid", out.Data; e != a { @@ -454,7 +492,7 @@ func TestRequestRecoverTimeoutWithNilResponse(t *testing.T) { if err != nil { t.Fatalf("expect no error, but got %v", err) } - if e, a := 1, int(r.RetryCount); e != a { + if e, a := 1, r.RetryCount; e != a { t.Errorf("expect %d retry count, got %d", e, a) } if e, a := "valid", out.Data; e != a { @@ -548,7 +586,7 @@ func TestIsSerializationErrorRetryable(t *testing.T) { Error: c.err, } if r.IsErrorRetryable() != c.expected { - t.Errorf("Case %d: Expected %v, but received %v", i+1, c.expected, !c.expected) + t.Errorf("Case %d: Expected %v, but received %v", i, c.expected, !c.expected) } } } @@ -614,85 +652,6 @@ func TestWithGetResponseHeaders(t *testing.T) { } } -type connResetCloser struct { - Err error -} - -func (rc *connResetCloser) Read(b []byte) (int, error) { - return 0, rc.Err -} - -func (rc *connResetCloser) Close() error { - return nil -} - -func TestSerializationErrConnectionReset_accept(t *testing.T) { - count := 0 - handlers := request.Handlers{} - handlers.Send.PushBack(func(r *request.Request) { - count++ - r.HTTPResponse = &http.Response{} - r.HTTPResponse.Body = &connResetCloser{ - Err: errAcceptConnectionResetStub, - } - }) - - handlers.Sign.PushBackNamed(v4.SignRequestHandler) - handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) - handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) - - op := &request.Operation{ - Name: "op", - HTTPMethod: "POST", - HTTPPath: "/", - } - - meta := metadata.ClientInfo{ - ServiceName: "fooService", - SigningName: "foo", - SigningRegion: "foo", - Endpoint: "localhost", - APIVersion: "2001-01-01", - JSONVersion: "1.1", - TargetPrefix: "Foo", - } - cfg := unit.Session.Config.Copy() - cfg.MaxRetries = aws.Int(5) - - req := request.New( - *cfg, - meta, - handlers, - client.DefaultRetryer{NumMaxRetries: 5}, - op, - &struct { - }{}, - &struct { - }{}, - ) - - osErr := errAcceptConnectionResetStub - req.ApplyOptions(request.WithResponseReadTimeout(time.Second)) - err := req.Send() - if err == nil { - t.Error("Expected rror 'SerializationError', but received nil") - } - if aerr, ok := err.(awserr.Error); ok && aerr.Code() != "SerializationError" { - t.Errorf("Expected 'SerializationError', but received %q", aerr.Code()) - } else if !ok { - t.Errorf("Expected 'awserr.Error', but received %v", reflect.TypeOf(err)) - } else if aerr.OrigErr().Error() != osErr.Error() { - t.Errorf("Expected %q, but received %q", osErr.Error(), aerr.OrigErr().Error()) - } - - if count != 6 { - t.Errorf("Expected '6', but received %d", count) - } -} - type testRetryer struct { shouldRetry bool } @@ -720,7 +679,7 @@ func (d *testRetryer) ShouldRetry(r *request.Request) bool { func TestEnforceShouldRetryCheck(t *testing.T) { tp := &http.Transport{ - Proxy: http.ProxyFromEnvironment, + Proxy: http.ProxyFromEnvironment, ResponseHeaderTimeout: 1 * time.Millisecond, } @@ -753,7 +712,7 @@ func TestEnforceShouldRetryCheck(t *testing.T) { if err == nil { t.Fatalf("expect error, but got nil") } - if e, a := 3, int(r.RetryCount); e != a { + if e, a := 3, r.RetryCount; e != a { t.Errorf("expect %d retry count, got %d", e, a) } if !retryer.shouldRetry { @@ -1122,7 +1081,7 @@ func Test501NotRetrying(t *testing.T) { if e, a := "NotImplemented", aerr.Code(); e != a { t.Errorf("expected error code %q, but received %q", e, a) } - if e, a := 1, int(r.RetryCount); e != a { + if e, a := 1, r.RetryCount; e != a { t.Errorf("expect %d retry count, got %d", e, a) } } @@ -1148,11 +1107,52 @@ func TestRequestNoConnection(t *testing.T) { t.Fatal("expect error, but got none") } + t.Logf("Error, %v", err) + awsError := err.(awserr.Error) + origError := awsError.OrigErr() + t.Logf("Orig Error: %#v of type %T", origError, origError) + if e, a := 10, r.RetryCount; e != a { t.Errorf("expect %v retry count, got %v", e, a) } } +func TestRequestBodySeekFails(t *testing.T) { + s := awstesting.NewClient() + s.Handlers.Validate.Clear() + s.Handlers.Build.Clear() + + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + r.SetReaderBody(&stubSeekFail{ + Err: fmt.Errorf("failed to seek reader"), + }) + err := r.Send() + if err == nil { + t.Fatal("expect error, but got none") + } + + aerr := err.(awserr.Error) + if e, a := request.ErrCodeSerialization, aerr.Code(); e != a { + t.Errorf("expect %v error code, got %v", e, a) + } + +} + +type stubSeekFail struct { + Err error +} + +func (f *stubSeekFail) Read(b []byte) (int, error) { + return len(b), nil +} +func (f *stubSeekFail) ReadAt(b []byte, offset int64) (int, error) { + return len(b), nil +} +func (f *stubSeekFail) Seek(offset int64, mode int) (int64, error) { + return 0, f.Err +} + func getFreePort() (int, error) { l, err := net.Listen("tcp", ":0") if err != nil { diff --git a/aws/request/retryer.go b/aws/request/retryer.go index 2c5b1ef0..0236dd4c 100644 --- a/aws/request/retryer.go +++ b/aws/request/retryer.go @@ -1,23 +1,41 @@ package request import ( + "net" + "net/url" + "strings" "time" "github.com/IBM/ibm-cos-sdk-go/aws" "github.com/IBM/ibm-cos-sdk-go/aws/awserr" ) -// Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the client.DefaultRetryer -// structure, which contains basic retry logic using exponential backoff. +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle +// methods to determine if the request is retried. type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. MaxRetries() int } -// WithRetryer sets a config Retryer value to the given Config returning it -// for chaining. +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { cfg.Retryer = retryer return cfg @@ -108,32 +126,87 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { // IsErrorRetryable returns whether the error is retryable, based on its Code. // Returns false if error is nil. func IsErrorRetryable(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry := shouldRetryError(origErr) + if err.Code() == "RequestError" && !shouldRetry { + return false + } } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true } - return false } // IsErrorThrottle returns whether the error is to be throttled based on its code. // Returns false if error is nil. func IsErrorThrottle(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeThrottle(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) } return false } -// IsErrorExpiredCreds returns whether the error code is a credential expiry error. -// Returns false if error is nil. +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. func IsErrorExpiredCreds(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeExpiredCreds(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) } return false } @@ -143,17 +216,47 @@ func IsErrorExpiredCreds(err error) bool { // // Alias for the utility function IsErrorRetryable func (r *Request) IsErrorRetryable() bool { + if r.Error == nil { + return false + } + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + return IsErrorRetryable(r.Error) } -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. // // Alias for the utility function IsErrorThrottle func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case 429, 502, 503, 504: + return true + } + } + return IsErrorThrottle(r.Error) } +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + // IsErrorExpired returns whether the error code is a credential expiry error. // Returns false if the request has no Error set. // diff --git a/aws/session/credentials.go b/aws/session/credentials.go new file mode 100644 index 00000000..73ccd6e3 --- /dev/null +++ b/aws/session/credentials.go @@ -0,0 +1,124 @@ +package session + +import ( + "fmt" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials/processcreds" + + //"github.com/IBM/ibm-cos-sdk-go/aws/credentials/stscreds" + + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided an Profile in the session's configuration + // so load that profile from shared config first. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + // defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + return creds, nil +} + +// valid credential source values +const ( + credSourceEnvironment = "Environment" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/aws/session/custom_ca_bundle_test.go b/aws/session/custom_ca_bundle_test.go index acbff56c..720e7177 100644 --- a/aws/session/custom_ca_bundle_test.go +++ b/aws/session/custom_ca_bundle_test.go @@ -52,8 +52,8 @@ func skipTravisTest(t *testing.T) { func TestNewSession_WithCustomCABundle_Env(t *testing.T) { skipTravisTest(t) - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() endpoint, err := awstesting.CreateTLSServer(TLSBundleCertFile, TLSBundleKeyFile, nil) if err != nil { @@ -86,8 +86,8 @@ func TestNewSession_WithCustomCABundle_Env(t *testing.T) { } func TestNewSession_WithCustomCABundle_EnvNotExists(t *testing.T) { - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() os.Setenv("AWS_CA_BUNDLE", "file-not-exists") @@ -106,8 +106,8 @@ func TestNewSession_WithCustomCABundle_EnvNotExists(t *testing.T) { func TestNewSession_WithCustomCABundle_Option(t *testing.T) { skipTravisTest(t) - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() endpoint, err := awstesting.CreateTLSServer(TLSBundleCertFile, TLSBundleKeyFile, nil) if err != nil { @@ -143,8 +143,8 @@ func TestNewSession_WithCustomCABundle_Option(t *testing.T) { func TestNewSession_WithCustomCABundle_HTTPProxyAvailable(t *testing.T) { skipTravisTest(t) - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() s, err := NewSessionWithOptions(Options{ Config: aws.Config{ @@ -173,8 +173,8 @@ func TestNewSession_WithCustomCABundle_HTTPProxyAvailable(t *testing.T) { func TestNewSession_WithCustomCABundle_OptionPriority(t *testing.T) { skipTravisTest(t) - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() endpoint, err := awstesting.CreateTLSServer(TLSBundleCertFile, TLSBundleKeyFile, nil) if err != nil { @@ -216,8 +216,8 @@ func (m *mockRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { } func TestNewSession_WithCustomCABundle_UnsupportedTransport(t *testing.T) { - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() s, err := NewSessionWithOptions(Options{ Config: aws.Config{ @@ -245,8 +245,8 @@ func TestNewSession_WithCustomCABundle_UnsupportedTransport(t *testing.T) { func TestNewSession_WithCustomCABundle_TransportSet(t *testing.T) { skipTravisTest(t) - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() endpoint, err := awstesting.CreateTLSServer(TLSBundleCertFile, TLSBundleKeyFile, nil) if err != nil { diff --git a/aws/session/doc.go b/aws/session/doc.go index 930c483a..51a2b4e2 100644 --- a/aws/session/doc.go +++ b/aws/session/doc.go @@ -1,97 +1,92 @@ /* -Package session provides configuration for the SDK's service clients. - -Sessions can be shared across all service clients that share the same base -configuration. The Session is built from the SDK's default configuration and -request handlers. - -Sessions should be cached when possible, because creating a new Session will -load all configuration values from the environment, and config files each time -the Session is created. Sharing the Session value across all of your service -clients will ensure the configuration is loaded the fewest number of times possible. - -Concurrency +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. Sessions are safe to use concurrently as long as the Session is not being -modified. The SDK will not modify the Session once the Session has been created. -Creating service clients concurrently from a shared Session is safe. - -Sessions from Shared Config - -Sessions can be created using the method above that will only load the -additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. -Alternatively you can explicitly create a Session with shared config enabled. -To do this you can use NewSessionWithOptions to configure how the Session will -be created. Using the NewSessionWithOptions with SharedConfigState set to -SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG -environment variable was set. +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. -Creating Sessions - -When creating Sessions optional aws.Config values can be passed in that will -override the default, or loaded config values the Session is being created -with. This allows you to provide additional, or case based, configuration -as needed. +Sessions options from Shared Config By default NewSession will only load credentials from the shared credentials file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value the Session will be created from the configuration values from the shared config (~/.aws/config) and shared credentials -(~/.aws/credentials) files. See the section Sessions from Shared Config for -more information. +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. -Create a Session with the default config and request handlers. With credentials -region, and profile loaded from the environment and shared config automatically. -Requires the AWS_PROFILE to be set, or "default" is used. +Credential and config loading order - // Create Session - sess := session.Must(session.NewSession()) +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: - // Create a Session with a custom region - sess := session.Must(session.NewSession(&aws.Config{ - Region: aws.String("us-east-1"), - })) + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) - // Create a S3 client instance from a session - sess := session.Must(session.NewSession()) +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) + +Creating Sessions + +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. + + // Create Session + sess, err := session.NewSession() - svc := s3.New(sess) -Create Session With Option Overrides +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. -In addition to NewSession, Sessions can be created using NewSessionWithOptions. -This func allows you to control and override how the Session will be created -through code instead of being driven by environment variables only. + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) -Use NewSessionWithOptions when you want to provide the config profile, or -override the shared config state (AWS_SDK_LOAD_CONFIG). +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). // Equivalent to session.NewSession() - sess := session.Must(session.NewSessionWithOptions(session.Options{ + sess, err := session.NewSessionWithOptions(session.Options{ // Options - })) + }) - // Specify profile to load for the session's config - sess := session.Must(session.NewSessionWithOptions(session.Options{ - Profile: "profile_name", - })) + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", - // Specify profile for config and region for requests - sess := session.Must(session.NewSessionWithOptions(session.Options{ - Config: aws.Config{Region: aws.String("us-east-1")}, - Profile: "profile_name", - })) + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, - // Force enable Shared Config support - sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Force enable Shared Config support SharedConfigState: session.SharedConfigEnable, - })) + }) Adding Handlers -You can add handlers to a session for processing HTTP requests. All service -clients that use the session inherit the handlers. For example, the following -handler logs every request and its payload made by a service client: +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. // Create a session, and add additional handlers for all service // clients created with the Session to inherit. Adds logging handler. @@ -99,22 +94,15 @@ handler logs every request and its payload made by a service client: sess.Handlers.Send.PushFront(func(r *request.Request) { // Log every request made and its payload - logger.Printf("Request: %s/%s, Payload: %s", + logger.Printf("Request: %s/%s, Params: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) }) -Deprecated "New" function - -The New session function has been deprecated because it does not provide good -way to return errors that occur when loading the configuration files and values. -Because of this, NewSession was created so errors can be retrieved when -creating a session fails. - Shared Config Fields -By default the SDK will only load the shared credentials file's (~/.aws/credentials) -credentials values, and all other config is provided by the environment variables, -SDK defaults, and user provided aws.Config values. +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable option is used to create the Session the full shared config values will be @@ -125,67 +113,23 @@ files have the same format. If both config files are present the configuration from both files will be read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). -Credentials are the values the SDK should use for authenticating requests with -AWS Services. They are from a configuration file will need to include both -aws_access_key_id and aws_secret_access_key must be provided together in the -same file to be considered valid. The values will be ignored if not a complete -group. aws_session_token is an optional field that can be provided if both of -the other two fields are also provided. +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. aws_access_key_id = AKID aws_secret_access_key = SECRET aws_session_token = TOKEN -Assume Role values allow you to configure the SDK to assume an IAM role using -a set of credentials provided in a config file via the source_profile field. -Both "role_arn" and "source_profile" are required. The SDK supports assuming -a role with MFA token if the session option AssumeRoleTokenProvider -is set. - - role_arn = arn:aws:iam:::role/ - source_profile = profile_with_creds - external_id = 1234 - mfa_serial = - role_session_name = session_name - -Region is the region the SDK should use for looking up AWS service endpoints -and signing requests. - + ; region only supported if SharedConfigEnabled. region = us-east-1 -Assume Role with MFA token - -To create a session with support for assuming an IAM role with MFA set the -session option AssumeRoleTokenProvider to a function that will prompt for the -MFA token code when the SDK assumes the role and refreshes the role's credentials. -This allows you to configure the SDK via the shared config to assumea role -with MFA tokens. - -In order for the SDK to assume a role with MFA the SharedConfigState -session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG -environment variable set. - -The shared configuration instructs the SDK to assume an IAM role with MFA -when the mfa_serial configuration field is set in the shared config -(~/.aws/config) or shared credentials (~/.aws/credentials) file. - -If mfa_serial is set in the configuration, the SDK will assume the role, and -the AssumeRoleTokenProvider session option is not set an an error will -be returned when creating the session. - - sess := session.Must(session.NewSessionWithOptions(session.Options{ - AssumeRoleTokenProvider: stscreds.StdinTokenProvider, - })) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess) - -To setup assume role outside of a session see the stscreds.AssumeRoleProvider -documentation. - Environment Variables When a Session is created several environment variables can be set to adjust @@ -195,7 +139,7 @@ require multiple of the values to set or the partial values will be ignored. All environment variable values are strings unless otherwise noted. Environment configuration values. If set both Access Key ID and Secret Access -Key must be provided. Session token and optionally also be provided, but is +Key must be provided. Session Token and optionally also be provided, but is not required. # Access Key ID @@ -206,7 +150,7 @@ not required. AWS_SECRET_ACCESS_KEY=SECRET AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - # Session token + # Session Token AWS_SESSION_TOKEN=TOKEN Region value will instruct the SDK where to make service API requests to. If is diff --git a/aws/session/env_config.go b/aws/session/env_config.go index 4f34d3b7..5e84bf80 100644 --- a/aws/session/env_config.go +++ b/aws/session/env_config.go @@ -98,11 +98,6 @@ type envConfig struct { // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle CustomCABundle string - csmEnabled string - CSMEnabled bool - CSMPort string - CSMClientID string - enableEndpointDiscovery string // Enables endpoint discovery via environment variables. // @@ -111,15 +106,6 @@ type envConfig struct { } var ( - csmEnabledEnvKey = []string{ - "AWS_CSM_ENABLED", - } - csmPortEnvKey = []string{ - "AWS_CSM_PORT", - } - csmClientIDEnvKey = []string{ - "AWS_CSM_CLIENT_ID", - } credAccessEnvKey = []string{ "AWS_ACCESS_KEY_ID", "AWS_ACCESS_KEY", @@ -178,15 +164,16 @@ func envConfigLoad(enableSharedConfig bool) envConfig { cfg.EnableSharedConfig = enableSharedConfig - setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) - setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) - setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) - - // CSM environment variables - setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) - setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) - setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) - cfg.CSMEnabled = len(cfg.csmEnabled) > 0 + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } // Require logical grouping of credentials if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { diff --git a/aws/session/env_config_test.go b/aws/session/env_config_test.go index b3068a72..a8f2b611 100644 --- a/aws/session/env_config_test.go +++ b/aws/session/env_config_test.go @@ -3,10 +3,12 @@ package session import ( "os" "reflect" + "strconv" "testing" "github.com/IBM/ibm-cos-sdk-go/aws/credentials" "github.com/IBM/ibm-cos-sdk-go/awstesting" + "github.com/IBM/ibm-cos-sdk-go/internal/sdktesting" "github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults" ) @@ -75,24 +77,27 @@ func TestLoadEnvConfig_Creds(t *testing.T) { }, } - for _, c := range cases { - os.Clearenv() + for i, c := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() + for k, v := range c.Env { + os.Setenv(k, v) + } - for k, v := range c.Env { - os.Setenv(k, v) - } + cfg := loadEnvConfig() + if !reflect.DeepEqual(c.Val, cfg.Creds) { + t.Errorf("expect credentials to match.\n%s", + awstesting.SprintExpectActual(c.Val, cfg.Creds)) + } + }) - cfg := loadEnvConfig() - if !reflect.DeepEqual(c.Val, cfg.Creds) { - t.Errorf("expect credentials to match.\n%s", - awstesting.SprintExpectActual(c.Val, cfg.Creds)) - } } } func TestLoadEnvConfig(t *testing.T) { - env := awstesting.StashEnv() - defer awstesting.PopEnv(env) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() cases := []struct { Env map[string]string @@ -266,30 +271,32 @@ func TestLoadEnvConfig(t *testing.T) { }, } - for _, c := range cases { - os.Clearenv() - - for k, v := range c.Env { - os.Setenv(k, v) - } + for i, c := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + restoreEnvFn = sdktesting.StashEnv() + defer restoreEnvFn() + for k, v := range c.Env { + os.Setenv(k, v) + } - var cfg envConfig - if c.UseSharedConfigCall { - cfg = loadSharedEnvConfig() - } else { - cfg = loadEnvConfig() - } + var cfg envConfig + if c.UseSharedConfigCall { + cfg = loadSharedEnvConfig() + } else { + cfg = loadEnvConfig() + } - if !reflect.DeepEqual(c.Config, cfg) { - t.Errorf("expect config to match.\n%s", - awstesting.SprintExpectActual(c.Config, cfg)) - } + if !reflect.DeepEqual(c.Config, cfg) { + t.Errorf("expect config to match.\n%s", + awstesting.SprintExpectActual(c.Config, cfg)) + } + }) } } func TestSetEnvValue(t *testing.T) { - env := awstesting.StashEnv() - defer awstesting.PopEnv(env) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() os.Setenv("empty_key", "") os.Setenv("second_key", "2") diff --git a/aws/session/session.go b/aws/session/session.go index 4291471b..8d5993c5 100644 --- a/aws/session/session.go +++ b/aws/session/session.go @@ -3,7 +3,6 @@ package session import ( "crypto/tls" "crypto/x509" - "fmt" "io" "io/ioutil" "net/http" @@ -15,7 +14,6 @@ import ( "github.com/IBM/ibm-cos-sdk-go/aws/corehandlers" "github.com/IBM/ibm-cos-sdk-go/aws/credentials" "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam" - "github.com/IBM/ibm-cos-sdk-go/aws/credentials/processcreds" "github.com/IBM/ibm-cos-sdk-go/aws/defaults" "github.com/IBM/ibm-cos-sdk-go/aws/endpoints" "github.com/IBM/ibm-cos-sdk-go/aws/request" @@ -222,6 +220,12 @@ type Options struct { // to also enable this feature. CustomCABundle session option field has priority // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. CustomCABundle io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -261,7 +265,7 @@ func NewSessionWithOptions(opts Options) (*Session, error) { envCfg = loadEnvConfig() } - if len(opts.Profile) > 0 { + if len(opts.Profile) != 0 { envCfg.Profile = opts.Profile } @@ -342,12 +346,16 @@ func Must(sess *Session, err error) *Session { func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() - handlers := defaults.Handlers() + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } // Get a merged version of the user provided config to determine if // credentials were. userCfg := &aws.Config{} userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) // Ordered config files will be loaded in with later files overwriting // previous config file values. @@ -364,9 +372,11 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } // Load additional config from file(s) - sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) if err != nil { - return nil, err + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } } if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { @@ -438,7 +448,11 @@ func loadCertPool(r io.Reader) (*x509.CertPool, error) { return p, nil } -func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { // Merge in user provided configuration cfg.MergeIn(userCfg) @@ -459,99 +473,17 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share } } - // Configure credentials if not already set + // Configure credentials if not already set by the user when creating the + // Session. if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { if iBmIamCreds := getIBMIAMCredentials(userCfg); iBmIamCreds != nil { cfg.Credentials = iBmIamCreds } else { - // inspect the profile to see if a credential source has been specified. - if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { - - // if both credential_source and source_profile have been set, return an error - // as this is undefined behavior. - if len(sharedCfg.AssumeRole.SourceProfile) > 0 { - return ErrSharedConfigSourceCollision - } - - // valid credential source values - const ( - //credSourceEc2Metadata= "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - //credSourceECSContainer= "EcsContainer" - ) - - switch sharedCfg.AssumeRole.CredentialSource { - //case credSourceEc2Metadata: - // cfgCp := *cfg - // p := defaults.RemoteCredProvider(cfgCp, handlers) - // cfgCp.Credentials = credentials.NewCredentials(p) - // - // if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { - // // AssumeRole Token provider is required if doing Assume Role - // // with MFA. - // return AssumeRoleTokenProviderNotSetError{} - // } - // - // cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) - case credSourceEnvironment: - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - envCfg.Creds, - ) - //case credSourceECSContainer: - // if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { - // return ErrSharedConfigECSContainerEnvVarEmpty - // } - // - // cfgCp := *cfg - // p := defaults.RemoteCredProvider(cfgCp, handlers) - // creds := credentials.NewCredentials(p) - // - // cfg.Credentials = creds - default: - return ErrSharedConfigInvalidCredSource - } - - return nil - } - - if len(envCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - envCfg.Creds, - ) - } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { - cfgCp := *cfg - cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.AssumeRoleSource.Creds, - ) - - if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return AssumeRoleTokenProviderNotSetError{} - } - - //cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) - } else if len(sharedCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.Creds, - ) - } else if len(sharedCfg.CredentialProcess) > 0 { - cfg.Credentials = processcreds.NewCredentials( - sharedCfg.CredentialProcess, - ) - } else { - // Fallback to default credentials provider, include mock errors - // for the credential chain so user can identify why credentials - // failed to be retrieved. - cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, - &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, - //defaults.RemoteCredProvider(*cfg, handlers), - }, - }) + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err } + cfg.Credentials = creds } } @@ -575,44 +507,6 @@ func getIBMIAMCredentials(config *aws.Config) *credentials.Credentials { return nil } -// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the -// MFAToken option is not set when shared config is configured load assume a -// role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Code is the short id of the error. -func (e AssumeRoleTokenProviderNotSetError) Code() string { - return "AssumeRoleTokenProviderNotSetError" -} - -// Message is the description of the error -func (e AssumeRoleTokenProviderNotSetError) Message() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} - -// OrigErr is the underlying error that caused the failure. -func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -type credProviderError struct { - Err error -} - -var emptyCreds = credentials.Value{} - -func (c credProviderError) Retrieve() (credentials.Value, error) { - return credentials.Value{}, c.Err -} -func (c credProviderError) IsExpired() bool { - return true -} - func initHandlers(s *Session) { // Add the Validate parameter handler if it is not disabled. s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) diff --git a/aws/session/session_test.go b/aws/session/session_test.go index cda3331c..01ef9d75 100644 --- a/aws/session/session_test.go +++ b/aws/session/session_test.go @@ -12,12 +12,11 @@ import ( "github.com/IBM/ibm-cos-sdk-go/aws/credentials" "github.com/IBM/ibm-cos-sdk-go/aws/defaults" "github.com/IBM/ibm-cos-sdk-go/aws/endpoints" - "github.com/IBM/ibm-cos-sdk-go/awstesting" ) func TestNewDefaultSession(t *testing.T) { - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() s, _ := NewSession(&aws.Config{Region: aws.String("region")}) @@ -36,9 +35,8 @@ func TestNewDefaultSession(t *testing.T) { } func TestNew_WithCustomCreds(t *testing.T) { - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) - + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() customCreds := credentials.NewStaticCredentials("AKID", "SECRET", "TOKEN") s, _ := NewSession(&aws.Config{Credentials: customCreds}) @@ -55,43 +53,9 @@ func (w mockLogger) Log(args ...interface{}) { fmt.Fprintln(w, args...) } -//// SharedConfigAssumeRoleError: -//// failed to load assume role for assume_role_invalid_source_profile_role_arn, -//// source profile has no shared credentials -//func TestNew_WithSessionLoadError(t *testing.T) { -// oldEnv := initSessionTestEnv() -// defer awstesting.PopEnv(oldEnv) -// -// os.Setenv("AWS_SDK_LOAD_CONFIG", "1") -// os.Setenv("AWS_CONFIG_FILE", testConfigFilename) -// os.Setenv("AWS_PROFILE", "assume_role_invalid_source_profile") -// -// logger := bytes.Buffer{} -// s, err := NewSession(&aws.Config{Logger: &mockLogger{&logger}}) -// -// if s == nil { -// t.Errorf("expect not nil") -// } -// -// svc := s3.New(s) -// _, err = svc.ListBuckets(&s3.ListBucketsInput{}) -// -// if err == nil { -// t.Errorf("expect not nil") -// } -// if e, a := "ERROR: failed to create session with AWS_SDK_LOAD_CONFIG enabled", logger.String(); !strings.Contains(a, e) { -// t.Errorf("expect %v, to contain %v", e, a) -// } -// if e, a := (SharedConfigAssumeRoleError{ -// RoleARN: "assume_role_invalid_source_profile_role_arn", -// }).Error(), err.Error(); !strings.Contains(a, e) { -// t.Errorf("expect %v, to contain %v", e, a) -// } -//} - func TestSessionCopy(t *testing.T) { - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() os.Setenv("AWS_REGION", "orig_region") @@ -147,10 +111,10 @@ func TestSessionClientConfig(t *testing.T) { } func TestNewSession_NoCredentials(t *testing.T) { - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) - + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() s, err := NewSession() + if err != nil { t.Errorf("expect nil, %v", err) } @@ -164,8 +128,8 @@ func TestNewSession_NoCredentials(t *testing.T) { } func TestNewSessionWithOptions_OverrideProfile(t *testing.T) { - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() os.Setenv("AWS_SDK_LOAD_CONFIG", "1") os.Setenv("AWS_SHARED_CREDENTIALS_FILE", testConfigFilename) @@ -196,13 +160,13 @@ func TestNewSessionWithOptions_OverrideProfile(t *testing.T) { t.Errorf("expect empty, got %v", v) } if e, a := "SharedConfigCredentials", creds.ProviderName; !strings.Contains(a, e) { - t.Errorf("expect %v, to contain %v", e, a) + t.Errorf("expect %v, to be in %v", e, a) } } func TestNewSessionWithOptions_OverrideSharedConfigEnable(t *testing.T) { - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() os.Setenv("AWS_SDK_LOAD_CONFIG", "0") os.Setenv("AWS_SHARED_CREDENTIALS_FILE", testConfigFilename) @@ -233,13 +197,13 @@ func TestNewSessionWithOptions_OverrideSharedConfigEnable(t *testing.T) { t.Errorf("expect empty, got %v", v) } if e, a := "SharedConfigCredentials", creds.ProviderName; !strings.Contains(a, e) { - t.Errorf("expect %v, to contain %v", e, a) + t.Errorf("expect %v, to be in %v", e, a) } } func TestNewSessionWithOptions_OverrideSharedConfigDisable(t *testing.T) { - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() os.Setenv("AWS_SDK_LOAD_CONFIG", "1") os.Setenv("AWS_SHARED_CREDENTIALS_FILE", testConfigFilename) @@ -270,13 +234,13 @@ func TestNewSessionWithOptions_OverrideSharedConfigDisable(t *testing.T) { t.Errorf("expect empty, got %v", v) } if e, a := "SharedConfigCredentials", creds.ProviderName; !strings.Contains(a, e) { - t.Errorf("expect %v, to contain %v", e, a) + t.Errorf("expect %v, to be in %v", e, a) } } func TestNewSessionWithOptions_OverrideSharedConfigFiles(t *testing.T) { - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() os.Setenv("AWS_SDK_LOAD_CONFIG", "1") os.Setenv("AWS_SHARED_CREDENTIALS_FILE", testConfigFilename) @@ -307,18 +271,18 @@ func TestNewSessionWithOptions_OverrideSharedConfigFiles(t *testing.T) { t.Errorf("expect empty, got %v", v) } if e, a := "SharedConfigCredentials", creds.ProviderName; !strings.Contains(a, e) { - t.Errorf("expect %v, to contain %v", e, a) + t.Errorf("expect %v, to be in %v", e, a) } } func TestNewSessionWithOptions_Overrides(t *testing.T) { - cases := []struct { + cases := map[string]struct { InEnvs map[string]string InProfile string OutRegion string OutCreds credentials.Value }{ - { + "env profile with opt profile": { InEnvs: map[string]string{ "AWS_SDK_LOAD_CONFIG": "0", "AWS_SHARED_CREDENTIALS_FILE": testConfigFilename, @@ -332,7 +296,7 @@ func TestNewSessionWithOptions_Overrides(t *testing.T) { ProviderName: "SharedConfigCredentials", }, }, - { + "env creds with env profile": { InEnvs: map[string]string{ "AWS_SDK_LOAD_CONFIG": "0", "AWS_SHARED_CREDENTIALS_FILE": testConfigFilename, @@ -341,7 +305,6 @@ func TestNewSessionWithOptions_Overrides(t *testing.T) { "AWS_SECRET_ACCESS_KEY": "env_secret", "AWS_PROFILE": "other_profile", }, - InProfile: "full_profile", OutRegion: "env_region", OutCreds: credentials.Value{ AccessKeyID: "env_akid", @@ -349,12 +312,29 @@ func TestNewSessionWithOptions_Overrides(t *testing.T) { ProviderName: "EnvConfigCredentials", }, }, - { + "env creds with opt profile": { + InEnvs: map[string]string{ + "AWS_SDK_LOAD_CONFIG": "0", + "AWS_SHARED_CREDENTIALS_FILE": testConfigFilename, + "AWS_REGION": "env_region", + "AWS_ACCESS_KEY": "env_akid", + "AWS_SECRET_ACCESS_KEY": "env_secret", + "AWS_PROFILE": "other_profile", + }, + InProfile: "full_profile", + OutRegion: "env_region", + OutCreds: credentials.Value{ + AccessKeyID: "full_profile_akid", + SecretAccessKey: "full_profile_secret", + ProviderName: "SharedConfigCredentials", + }, + }, + "cfg and cred file with opt profile": { InEnvs: map[string]string{ "AWS_SDK_LOAD_CONFIG": "0", "AWS_SHARED_CREDENTIALS_FILE": testConfigFilename, "AWS_CONFIG_FILE": testConfigOtherFilename, - "AWS_PROFILE": "shared_profile", + "AWS_PROFILE": "other_profile", }, InProfile: "config_file_load_order", OutRegion: "shared_config_region", @@ -366,360 +346,42 @@ func TestNewSessionWithOptions_Overrides(t *testing.T) { }, } - for _, c := range cases { - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) - - for k, v := range c.InEnvs { - os.Setenv(k, v) - } - - s, err := NewSessionWithOptions(Options{ - Profile: c.InProfile, - SharedConfigState: SharedConfigEnable, + for name, c := range cases { + t.Run(name, func(t *testing.T) { + restoreEnvFn := initSessionTestEnv() + defer restoreEnvFn() + + for k, v := range c.InEnvs { + os.Setenv(k, v) + } + + s, err := NewSessionWithOptions(Options{ + Profile: c.InProfile, + SharedConfigState: SharedConfigEnable, + }) + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + creds, err := s.Config.Credentials.Get() + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + if e, a := c.OutRegion, *s.Config.Region; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := c.OutCreds.AccessKeyID, creds.AccessKeyID; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := c.OutCreds.SecretAccessKey, creds.SecretAccessKey; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := c.OutCreds.SessionToken, creds.SessionToken; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := c.OutCreds.ProviderName, creds.ProviderName; !strings.Contains(a, e) { + t.Errorf("expect %v, to be in %v", e, a) + } }) - if err != nil { - t.Errorf("expect nil, %v", err) - } - - creds, err := s.Config.Credentials.Get() - if err != nil { - t.Errorf("expect nil, %v", err) - } - if e, a := c.OutRegion, *s.Config.Region; e != a { - t.Errorf("expect %v, got %v", e, a) - } - if e, a := c.OutCreds.AccessKeyID, creds.AccessKeyID; e != a { - t.Errorf("expect %v, got %v", e, a) - } - if e, a := c.OutCreds.SecretAccessKey, creds.SecretAccessKey; e != a { - t.Errorf("expect %v, got %v", e, a) - } - if e, a := c.OutCreds.SessionToken, creds.SessionToken; e != a { - t.Errorf("expect %v, got %v", e, a) - } - if e, a := c.OutCreds.ProviderName, creds.ProviderName; !strings.Contains(a, e) { - t.Errorf("expect %v, to contain %v", e, a) - } - } -} - -const assumeRoleRespMsg = ` - - - - arn:aws:sts::account_id:assumed-role/role/session_name - AKID:session_name - - - AKID - SECRET - SESSION_TOKEN - %s - - - - request-id - - -` - -func TestSessionAssumeRole_WithMFA_NoTokenProvider(t *testing.T) { - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) - - os.Setenv("AWS_REGION", "us-east-1") - os.Setenv("AWS_SDK_LOAD_CONFIG", "1") - os.Setenv("AWS_SHARED_CREDENTIALS_FILE", testConfigFilename) - os.Setenv("AWS_PROFILE", "assume_role_w_creds") - - _, err := NewSessionWithOptions(Options{ - Profile: "assume_role_w_mfa", - SharedConfigState: SharedConfigEnable, - }) - if e, a := (AssumeRoleTokenProviderNotSetError{}), err; e != a { - t.Errorf("expect %v, got %v", e, a) - } -} - -func TestSessionAssumeRole_DisableSharedConfig(t *testing.T) { - // Backwards compatibility with Shared config disabled - // assume role should not be built into the config. - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) - - os.Setenv("AWS_SDK_LOAD_CONFIG", "0") - os.Setenv("AWS_SHARED_CREDENTIALS_FILE", testConfigFilename) - os.Setenv("AWS_PROFILE", "assume_role_w_creds") - - s, err := NewSession() - if err != nil { - t.Errorf("expect nil, %v", err) - } - - creds, err := s.Config.Credentials.Get() - if err != nil { - t.Errorf("expect nil, %v", err) - } - if e, a := "assume_role_w_creds_akid", creds.AccessKeyID; e != a { - t.Errorf("expect %v, got %v", e, a) - } - if e, a := "assume_role_w_creds_secret", creds.SecretAccessKey; e != a { - t.Errorf("expect %v, got %v", e, a) - } - if e, a := "SharedConfigCredentials", creds.ProviderName; !strings.Contains(a, e) { - t.Errorf("expect %v, to contain %v", e, a) } } - -func TestSessionAssumeRole_InvalidSourceProfile(t *testing.T) { - // Backwards compatibility with Shared config disabled - // assume role should not be built into the config. - oldEnv := initSessionTestEnv() - defer awstesting.PopEnv(oldEnv) - - os.Setenv("AWS_SDK_LOAD_CONFIG", "1") - os.Setenv("AWS_SHARED_CREDENTIALS_FILE", testConfigFilename) - os.Setenv("AWS_PROFILE", "assume_role_invalid_source_profile") - - s, err := NewSession() - if err == nil { - t.Errorf("expect error") - } - if e, a := "SharedConfigAssumeRoleError: failed to load assume role", err.Error(); !strings.Contains(a, e) { - t.Errorf("expect %v, to contain %v", e, a) - } - if s != nil { - t.Errorf("expect nil, %v", err) - } -} - -func TestSharedConfigCredentialSource(t *testing.T) { - cases := []struct { - name string - profile string - expectedError error - expectedAccessKey string - expectedSecretKey string - init func(*aws.Config, string) func() error - }{ - { - name: "env var credential source", - profile: "env_var_credential_source", - expectedAccessKey: "access_key", - expectedSecretKey: "secret_key", - init: func(cfg *aws.Config, profile string) func() error { - os.Setenv("AWS_SDK_LOAD_CONFIG", "1") - os.Setenv("AWS_CONFIG_FILE", "testdata/credential_source_config") - os.Setenv("AWS_PROFILE", profile) - os.Setenv("AWS_ACCESS_KEY", "access_key") - os.Setenv("AWS_SECRET_KEY", "secret_key") - - return func() error { - os.Unsetenv("AWS_SDK_LOAD_CONFIG") - os.Unsetenv("AWS_CONFIG_FILE") - os.Unsetenv("AWS_PROFILE") - os.Unsetenv("AWS_ACCESS_KEY") - os.Unsetenv("AWS_SECRET_KEY") - - return nil - } - }, - }, - { - name: "credential source and source profile", - profile: "invalid_source_and_credential_source", - expectedError: ErrSharedConfigSourceCollision, - init: func(cfg *aws.Config, profile string) func() error { - os.Setenv("AWS_SDK_LOAD_CONFIG", "1") - os.Setenv("AWS_CONFIG_FILE", "testdata/credential_source_config") - os.Setenv("AWS_PROFILE", profile) - os.Setenv("AWS_ACCESS_KEY", "access_key") - os.Setenv("AWS_SECRET_KEY", "secret_key") - - return func() error { - os.Unsetenv("AWS_SDK_LOAD_CONFIG") - os.Unsetenv("AWS_CONFIG_FILE") - os.Unsetenv("AWS_PROFILE") - os.Unsetenv("AWS_ACCESS_KEY") - os.Unsetenv("AWS_SECRET_KEY") - - return nil - } - }, - }, - // { - // name: "ec2metadata credential source", - // profile: "ec2metadata", - // expectedAccessKey: "AKID", - // expectedSecretKey: "SECRET", - // init: func(cfg *aws.Config, profile string) func() error { - // os.Setenv("AWS_REGION", "us-east-1") - // os.Setenv("AWS_SDK_LOAD_CONFIG", "1") - // os.Setenv("AWS_CONFIG_FILE", "testdata/credential_source_config") - // os.Setenv("AWS_PROFILE", "ec2metadata") - // - // const ec2MetadataResponse = `{ - // "Code": "Success", - // "Type": "AWS-HMAC", - // "AccessKeyId" : "access-key", - // "SecretAccessKey" : "secret-key", - // "Token" : "token", - // "Expiration" : "2100-01-01T00:00:00Z", - // "LastUpdated" : "2009-11-23T0:00:00Z" - //}` - // - // ec2MetadataCalled := false - // ec2MetadataServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // if r.URL.Path == "/meta-data/iam/security-credentials/RoleName" { - // ec2MetadataCalled = true - // w.Write([]byte(ec2MetadataResponse)) - // } else if r.URL.Path == "/meta-data/iam/security-credentials/" { - // w.Write([]byte("RoleName")) - // } else { - // w.Write([]byte("")) - // } - // })) - // - // stsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // w.Write([]byte(fmt.Sprintf(assumeRoleRespMsg, time.Now().Add(15*time.Minute).Format("2006-01-02T15:04:05Z")))) - // })) - // - // cfg.EndpointResolver = endpoints.ResolverFunc( - // func(service, region string, opts ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { - // if service == "ec2metadata" { - // return endpoints.ResolvedEndpoint{ - // URL: ec2MetadataServer.URL, - // }, nil - // } - // - // return endpoints.ResolvedEndpoint{ - // URL: stsServer.URL, - // }, nil - // }, - // ) - // - // return func() error { - // os.Unsetenv("AWS_SDK_LOAD_CONFIG") - // os.Unsetenv("AWS_CONFIG_FILE") - // os.Unsetenv("AWS_PROFILE") - // os.Unsetenv("AWS_REGION") - // - // ec2MetadataServer.Close() - // stsServer.Close() - // - // if !ec2MetadataCalled { - // return fmt.Errorf("expected ec2metadata to be called") - // } - // - // return nil - // } - // }, - // }, - // { - // name: "ecs container credential source", - // profile: "ecscontainer", - // expectedAccessKey: "access-key", - // expectedSecretKey: "secret-key", - // init: func(cfg *aws.Config, profile string) func() error { - // os.Setenv("AWS_REGION", "us-east-1") - // os.Setenv("AWS_SDK_LOAD_CONFIG", "1") - // os.Setenv("AWS_CONFIG_FILE", "testdata/credential_source_config") - // os.Setenv("AWS_PROFILE", "ecscontainer") - // os.Setenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI", "/ECS") - // - // const ecsResponse = `{ - // "Code": "Success", - // "Type": "AWS-HMAC", - // "AccessKeyId" : "access-key", - // "SecretAccessKey" : "secret-key", - // "Token" : "token", - // "Expiration" : "2100-01-01T00:00:00Z", - // "LastUpdated" : "2009-11-23T0:00:00Z" - //}` - // - // ecsCredsCalled := false - // ecsMetadataServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // if r.URL.Path == "/ECS" { - // ecsCredsCalled = true - // w.Write([]byte(ecsResponse)) - // } else { - // w.Write([]byte("")) - // } - // })) - // - // shareddefaults.ECSContainerCredentialsURI = ecsMetadataServer.URL - // - // stsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // w.Write([]byte(fmt.Sprintf(assumeRoleRespMsg, time.Now().Add(15*time.Minute).Format("2006-01-02T15:04:05Z")))) - // })) - // - // cfg.Endpoint = aws.String(stsServer.URL) - // - // cfg.EndpointResolver = endpoints.ResolverFunc( - // func(service, region string, opts ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { - // fmt.Println("SERVICE", service) - // return endpoints.ResolvedEndpoint{ - // URL: stsServer.URL, - // }, nil - // }, - // ) - // - // return func() error { - // os.Unsetenv("AWS_SDK_LOAD_CONFIG") - // os.Unsetenv("AWS_CONFIG_FILE") - // os.Unsetenv("AWS_PROFILE") - // os.Unsetenv("AWS_REGION") - // os.Unsetenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") - // - // ecsMetadataServer.Close() - // stsServer.Close() - // - // if !ecsCredsCalled { - // return fmt.Errorf("expected ec2metadata to be called") - // } - // - // return nil - // } - // }, - // }, - } - - for _, c := range cases { - cfg := &aws.Config{} - clean := c.init(cfg, c.profile) - sess, err := NewSession(cfg) - if e, a := c.expectedError, err; e != a { - t.Errorf("expected %v, but received %v", e, a) - } - - if c.expectedError != nil { - continue - } - - creds, err := sess.Config.Credentials.Get() - if err != nil { - t.Errorf("expected no error, but received %v", err) - } - - if e, a := c.expectedAccessKey, creds.AccessKeyID; e != a { - t.Errorf("expected %v, but received %v", e, a) - } - - if e, a := c.expectedSecretKey, creds.SecretAccessKey; e != a { - t.Errorf("expected %v, but received %v", e, a) - } - - if err := clean(); err != nil { - t.Errorf("expected no error, but received %v", err) - } - } -} - -func initSessionTestEnv() (oldEnv []string) { - oldEnv = awstesting.StashEnv() - os.Setenv("AWS_CONFIG_FILE", "file_not_exists") - os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "file_not_exists") - - return oldEnv -} diff --git a/aws/session/shared_config.go b/aws/session/shared_config.go index f6a4760d..0d41cb51 100644 --- a/aws/session/shared_config.go +++ b/aws/session/shared_config.go @@ -28,6 +28,7 @@ const ( // endpoint discovery group enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + // External Credential Process credentialProcessKey = `credential_process` @@ -48,11 +49,11 @@ type assumeRoleConfig struct { // sharedConfig represents the configuration fields of the SDK config files. type sharedConfig struct { - // Credentials values from the config file. Both aws_access_key_id - // and aws_secret_access_key must be provided together in the same file - // to be considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of the - // other two fields are also provided. + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. // // aws_access_key_id // aws_secret_access_key @@ -64,6 +65,10 @@ type sharedConfig struct { // An external process to request credentials CredentialProcess string + CredentialSource string + + SourceProfileName string + SourceProfile *sharedConfig // Region is the region the SDK should use for looking up AWS service endpoints // and signing requests. @@ -83,17 +88,19 @@ type sharedConfigFile struct { IniData ini.Sections } -// loadSharedConfig retrieves the configuration from the list of files -// using the profile provided. The order the files are listed will determine +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine // precedence. Values in subsequent files will overwrite values defined in // earlier files. // // For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of A's. +// of the files are A then B, B's credential values will be used instead of +// A's. // // See sharedConfig.setFromFile for information how the config files // will be loaded. -func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { + if len(profile) == 0 { profile = DefaultSharedConfigProfile } @@ -104,16 +111,11 @@ func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) } cfg := sharedConfig{} - if err = cfg.setFromIniFiles(profile, files); err != nil { + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { return sharedConfig{}, err } - if len(cfg.AssumeRole.SourceProfile) > 0 { - if err := cfg.setAssumeRoleSource(profile, files); err != nil { - return sharedConfig{}, err - } - } - return cfg, nil } @@ -137,60 +139,59 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { return files, nil } -func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { - var assumeRoleSrc sharedConfig - - if len(cfg.AssumeRole.CredentialSource) > 0 { - // setAssumeRoleSource is only called when source_profile is found. - // If both source_profile and credential_source are set, then - // ErrSharedConfigSourceCollision will be returned - return ErrSharedConfigSourceCollision - } - - // Multiple level assume role chains are not support - if cfg.AssumeRole.SourceProfile == origProfile { - assumeRoleSrc = *cfg - assumeRoleSrc.AssumeRole = assumeRoleConfig{} - } else { - err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) - if err != nil { +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } return err } } - - if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { - return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr } - cfg.AssumeRoleSource = &assumeRoleSrc + profiles[profile] = struct{}{} - return nil -} + if err := cfg.validateCredentialType(); err != nil { + return err + } -func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { - // Trim files from the list that don't exist. - for _, f := range files { - if err := cfg.setFromIniFile(profile, f); err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); ok { - // Ignore proviles missings - continue - } + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() + + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { return err } + + cfg.SourceProfile = srcCfg } return nil } -// setFromFile loads the configuration from the file using -// the profile provided. A sharedConfig pointer type value is used so that -// multiple config file loadings can be chained. +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. // -// Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For example -// if a config file only includes aws_access_key_id but no aws_secret_access_key -// the aws_access_key_id will be ignored. -func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { section, ok := file.IniData.GetSection(profile) if !ok { // Fallback to to alternate profile name: profile @@ -200,32 +201,17 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e } } + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + // Shared Credentials - akid := section.String(accessKeyIDKey) - secret := section.String(secretAccessKey) - if len(akid) > 0 && len(secret) > 0 { - cfg.Creds = credentials.Value{ - AccessKeyID: akid, - SecretAccessKey: secret, - SessionToken: section.String(sessionTokenKey), - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), - } + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), } - - // Assume Role - roleArn := section.String(roleArnKey) - srcProfile := section.String(sourceProfileKey) - credentialSource := section.String(credentialSourceKey) - hasSource := len(srcProfile) > 0 || len(credentialSource) > 0 - if len(roleArn) > 0 && hasSource { - cfg.AssumeRole = assumeRoleConfig{ - RoleARN: roleArn, - SourceProfile: srcProfile, - CredentialSource: credentialSource, - ExternalID: section.String(externalIDKey), - MFASerial: section.String(mfaSerialKey), - RoleSessionName: section.String(roleSessionNameKey), - } + if creds.HasKeys() { + cfg.Creds = creds } // `credential_process` @@ -247,6 +233,62 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e return nil } +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + ) { + return ErrSharedConfigSourceCollision + } + + return nil +} + +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.Creds = credentials.Value{} +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + // SharedConfigLoadError is an error for the shared config file failed to load. type SharedConfigLoadError struct { Filename string diff --git a/aws/session/shared_config_test.go b/aws/session/shared_config_test.go index 19389527..2654835c 100644 --- a/aws/session/shared_config_test.go +++ b/aws/session/shared_config_test.go @@ -4,6 +4,7 @@ import ( "fmt" "path/filepath" "reflect" + "strconv" "strings" "testing" @@ -26,6 +27,7 @@ func TestLoadSharedConfig(t *testing.T) { { Filenames: []string{"file_not_exists"}, Profile: "default", + Expected: sharedConfig{}, }, { Filenames: []string{testConfigFilename}, @@ -57,91 +59,28 @@ func TestLoadSharedConfig(t *testing.T) { }, }, }, - { - Filenames: []string{testConfigOtherFilename, testConfigFilename}, - Profile: "assume_role", - Expected: sharedConfig{ - AssumeRole: assumeRoleConfig{ - RoleARN: "assume_role_role_arn", - SourceProfile: "complete_creds", - }, - AssumeRoleSource: &sharedConfig{ - Creds: credentials.Value{ - AccessKeyID: "complete_creds_akid", - SecretAccessKey: "complete_creds_secret", - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", testConfigFilename), - }, - }, - }, - }, - { - Filenames: []string{testConfigOtherFilename, testConfigFilename}, - Profile: "assume_role_invalid_source_profile", - Expected: sharedConfig{ - AssumeRole: assumeRoleConfig{ - RoleARN: "assume_role_invalid_source_profile_role_arn", - SourceProfile: "profile_not_exists", - }, - }, - Err: SharedConfigAssumeRoleError{RoleARN: "assume_role_invalid_source_profile_role_arn"}, - }, - { - Filenames: []string{testConfigOtherFilename, testConfigFilename}, - Profile: "assume_role_w_creds", - Expected: sharedConfig{ - Creds: credentials.Value{ - AccessKeyID: "assume_role_w_creds_akid", - SecretAccessKey: "assume_role_w_creds_secret", - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", testConfigFilename), - }, - AssumeRole: assumeRoleConfig{ - RoleARN: "assume_role_w_creds_role_arn", - SourceProfile: "assume_role_w_creds", - ExternalID: "1234", - RoleSessionName: "assume_role_w_creds_session_name", - }, - AssumeRoleSource: &sharedConfig{ - Creds: credentials.Value{ - AccessKeyID: "assume_role_w_creds_akid", - SecretAccessKey: "assume_role_w_creds_secret", - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", testConfigFilename), - }, - }, - }, - }, - { - Filenames: []string{testConfigOtherFilename, testConfigFilename}, - Profile: "assume_role_wo_creds", - Expected: sharedConfig{ - AssumeRole: assumeRoleConfig{ - RoleARN: "assume_role_wo_creds_role_arn", - SourceProfile: "assume_role_wo_creds", - }, - }, - Err: SharedConfigAssumeRoleError{RoleARN: "assume_role_wo_creds_role_arn"}, - }, - { - Filenames: []string{filepath.Join("testdata", "shared_config_invalid_ini")}, - Profile: "profile_name", - Err: SharedConfigLoadError{Filename: filepath.Join("testdata", "shared_config_invalid_ini")}, - }, } for i, c := range cases { - cfg, err := loadSharedConfig(c.Profile, c.Filenames) - if c.Err != nil { - if e, a := c.Err.Error(), err.Error(); !strings.Contains(a, e) { - t.Errorf("%d, expect %v, to contain %v", i, e, a) + t.Run(strconv.Itoa(i)+"_"+c.Profile, func(t *testing.T) { + cfg, err := loadSharedConfig(c.Profile, c.Filenames, true) + if c.Err != nil { + if err == nil { + t.Fatalf("expect error, got none") + } + if e, a := c.Err.Error(), err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v, to be in %v", e, a) + } + return } - continue - } - if err != nil { - t.Errorf("%d, expect nil, %v", i, err) - } - if e, a := c.Expected, cfg; !reflect.DeepEqual(e, a) { - t.Errorf("%d, expect %v, got %v", i, e, a) - } + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + if e, a := c.Expected, cfg; !reflect.DeepEqual(e, a) { + t.Errorf("expect %v, got %v", e, a) + } + }) } } @@ -206,29 +145,6 @@ func TestLoadSharedConfigFromFile(t *testing.T) { Region: "full_profile_region", }, }, - { - Profile: "partial_assume_role", - Expected: sharedConfig{}, - }, - { - Profile: "assume_role", - Expected: sharedConfig{ - AssumeRole: assumeRoleConfig{ - RoleARN: "assume_role_role_arn", - SourceProfile: "complete_creds", - }, - }, - }, - { - Profile: "assume_role_w_mfa", - Expected: sharedConfig{ - AssumeRole: assumeRoleConfig{ - RoleARN: "assume_role_role_arn", - SourceProfile: "complete_creds", - MFASerial: "0123456789", - }, - }, - }, { Profile: "does_not_exists", Err: SharedConfigProfileNotExistsError{Profile: "does_not_exists"}, @@ -236,22 +152,27 @@ func TestLoadSharedConfigFromFile(t *testing.T) { } for i, c := range cases { - cfg := sharedConfig{} + t.Run(strconv.Itoa(i)+"_"+c.Profile, func(t *testing.T) { + cfg := sharedConfig{} - err := cfg.setFromIniFile(c.Profile, iniFile) - if c.Err != nil { - if e, a := c.Err.Error(), err.Error(); !strings.Contains(a, e) { - t.Errorf("%d, expect %v, to contain %v", i, e, a) + err := cfg.setFromIniFile(c.Profile, iniFile, true) + if c.Err != nil { + if err == nil { + t.Fatalf("expect error, got none") + } + if e, a := c.Err.Error(), err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v, to be in %v", e, a) + } + return } - continue - } - if err != nil { - t.Errorf("%d, expect nil, %v", i, err) - } - if e, a := c.Expected, cfg; e != a { - t.Errorf("%d, expect %v, got %v", i, e, a) - } + if err != nil { + t.Errorf("expect no error, got %v", err) + } + if e, a := c.Expected, cfg; !reflect.DeepEqual(e, a) { + t.Errorf("expect %v, got %v", e, a) + } + }) } } diff --git a/aws/session/shared_test.go b/aws/session/shared_test.go new file mode 100644 index 00000000..ea18c781 --- /dev/null +++ b/aws/session/shared_test.go @@ -0,0 +1,15 @@ +package session + +import ( + "os" + + "github.com/IBM/ibm-cos-sdk-go/internal/sdktesting" +) + +func initSessionTestEnv() (oldEnv func()) { + oldEnv = sdktesting.StashEnv() + os.Setenv("AWS_CONFIG_FILE", "file_not_exists") + os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "file_not_exists") + + return oldEnv +} diff --git a/aws/session/testdata/credential_source_config b/aws/session/testdata/credential_source_config index 58b66614..f3fcfe8a 100644 --- a/aws/session/testdata/credential_source_config +++ b/aws/session/testdata/credential_source_config @@ -1,16 +1,33 @@ [env_var_credential_source] -role_arn = arn +role_arn = assume_role_w_creds_role_arn_env credential_source = Environment [invalid_source_and_credential_source] -role_arn = arn +role_arn = assume_role_w_creds_role_arn_bad credential_source = Environment source_profile = env_var_credential_source [ec2metadata] -role_arn = assume_role_w_creds_role_arn +role_arn = assume_role_w_creds_role_arn_ec2 credential_source = Ec2InstanceMetadata [ecscontainer] -role_arn = assume_role_w_creds_role_arn +role_arn = assume_role_w_creds_role_arn_ecs credential_source = EcsContainer + +[chained_assume_role] +role_arn = assume_role_w_creds_role_arn_chain +source_profile = ec2metadata + +[cred_proc_no_arn_set] +credential_process = cat ./testdata/test_json.json + +[cred_proc_arn_set] +role_arn = assume_role_w_creds_proc_role_arn +credential_process = cat ./testdata/test_json.json + +[chained_cred_proc] +role_arn = assume_role_w_creds_proc_source_prof +source_profile = cred_proc_no_arn_set + + diff --git a/aws/session/testdata/credential_source_config_for_windows b/aws/session/testdata/credential_source_config_for_windows new file mode 100644 index 00000000..34073afb --- /dev/null +++ b/aws/session/testdata/credential_source_config_for_windows @@ -0,0 +1,10 @@ +[cred_proc_no_arn_set] +credential_process = type .\testdata\test_json.json + +[cred_proc_arn_set] +role_arn = assume_role_w_creds_proc_role_arn +credential_process = type .\testdata\test_json.json + +[chained_cred_proc] +role_arn = assume_role_w_creds_proc_source_prof +source_profile = cred_proc_no_arn_set \ No newline at end of file diff --git a/aws/session/testdata/shared_config b/aws/session/testdata/shared_config index fe816fe2..7d645d0c 100644 --- a/aws/session/testdata/shared_config +++ b/aws/session/testdata/shared_config @@ -63,3 +63,19 @@ aws_secret_access_key = assume_role_w_creds_secret [assume_role_wo_creds] role_arn = assume_role_wo_creds_role_arn source_profile = assume_role_wo_creds + +[assume_role_with_credential_source] +role_arn = assume_role_with_credential_source_role_arn +credential_source = Ec2InstanceMetadata + +[multiple_assume_role] +role_arn = multiple_assume_role_role_arn +source_profile = assume_role + +[multiple_assume_role_with_credential_source] +role_arn = multiple_assume_role_with_credential_source_role_arn +source_profile = assume_role_with_credential_source + +[multiple_assume_role_with_credential_source2] +role_arn = multiple_assume_role_with_credential_source2_role_arn +source_profile = multiple_assume_role_with_credential_source diff --git a/aws/session/testdata/test_json.json b/aws/session/testdata/test_json.json new file mode 100644 index 00000000..2047224b --- /dev/null +++ b/aws/session/testdata/test_json.json @@ -0,0 +1,5 @@ +{ + "Version": 1, + "AccessKeyId": "cred_proc_akid", + "SecretAccessKey": "cred_proc_secret" +} diff --git a/aws/signer/v4/v4.go b/aws/signer/v4/v4.go index 70967c38..2071124b 100644 --- a/aws/signer/v4/v4.go +++ b/aws/signer/v4/v4.go @@ -98,25 +98,25 @@ var ignoredHeaders = rules{ var requiredSignedHeaders = rules{ whitelist{ mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, @@ -687,7 +687,11 @@ func (ctx *signingCtx) buildBodyDigest() error { if !aws.IsReaderSeekable(ctx.Body) { return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) } - hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) } if includeSHA256Header { @@ -734,10 +738,16 @@ func makeSha256(data []byte) []byte { return hash.Sum(nil) } -func makeSha256Reader(reader io.ReadSeeker) []byte { +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { hash := sha256.New() - start, _ := reader.Seek(0, sdkio.SeekCurrent) - defer reader.Seek(start, sdkio.SeekStart) + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. @@ -748,7 +758,7 @@ func makeSha256Reader(reader io.ReadSeeker) []byte { io.CopyN(hash, reader, size) } - return hash.Sum(nil) + return hash.Sum(nil), nil } const doubleSpace = " " diff --git a/aws/signer/v4/v4_test.go b/aws/signer/v4/v4_test.go index 0dac1ac3..7e54cfb5 100644 --- a/aws/signer/v4/v4_test.go +++ b/aws/signer/v4/v4_test.go @@ -566,6 +566,9 @@ func TestSignWithRequestBody(t *testing.T) { })) req, err := http.NewRequest("POST", server.URL, nil) + if err != nil { + t.Errorf("expect not no error, got %v", err) + } _, err = signer.Sign(req, bytes.NewReader(expectBody), "service", "region", time.Now()) if err != nil { @@ -600,6 +603,9 @@ func TestSignWithRequestBody_Overwrite(t *testing.T) { })) req, err := http.NewRequest("GET", server.URL, strings.NewReader("invalid body")) + if err != nil { + t.Errorf("expect not no error, got %v", err) + } _, err = signer.Sign(req, nil, "service", "region", time.Now()) req.ContentLength = 0 diff --git a/aws/types.go b/aws/types.go index c5d81d12..00491815 100644 --- a/aws/types.go +++ b/aws/types.go @@ -7,13 +7,18 @@ import ( "github.com/IBM/ibm-cos-sdk-go/internal/sdkio" ) -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should -// only be used with an io.Reader that is also an io.Seeker. Doing so may -// cause request signature errors, or request body's not sent for GET, HEAD -// and DELETE HTTP methods. +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. // -// Deprecated: Should only be used with io.ReadSeeker. If using for -// S3 PutObject to stream content use s3manager.Uploader instead. +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { return ReaderSeekerCloser{r} } @@ -43,7 +48,8 @@ func IsReaderSeekable(r io.Reader) bool { // Read reads from the reader up to size of p. The number of bytes read, and // error if it occurred will be returned. // -// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. // // Performs the same functionality as io.Reader Read func (r ReaderSeekerCloser) Read(p []byte) (int, error) { diff --git a/aws/version.go b/aws/version.go index e10f5d2a..796a5e04 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "ibm-cos-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.1.1.dev1" +const SDKVersion = "1.2.0" diff --git a/awstesting/util.go b/awstesting/util.go index 0939b150..4462bae9 100644 --- a/awstesting/util.go +++ b/awstesting/util.go @@ -97,16 +97,23 @@ func (c *FakeContext) Value(key interface{}) interface{} { // StashEnv stashes the current environment variables and returns an array of // all environment values as key=val strings. +// +// Deprecated: StashEnv exists for backward compatibility and may be removed from the future iterations. +// It is not `internal` so that if you really need to use its functionality, and understand breaking +// changes will be made, you are able to. func StashEnv() []string { env := os.Environ() os.Clearenv() - return env } // PopEnv takes the list of the environment values and injects them into the // process's environment variable data. Clears any existing environment values // that may already exist. +// +// Deprecated: PopEnv exists for backward compatibility and may be removed from the future iterations. +// It is not `internal` so that if you really need to use its functionality, and understand breaking +// changes will be made, you are able to. func PopEnv(env []string) { os.Clearenv() diff --git a/example/service/s3/loggingUploadObjectReadBehavior/README.md b/example/service/s3/loggingUploadObjectReadBehavior/README.md new file mode 100644 index 00000000..0e1f86ec --- /dev/null +++ b/example/service/s3/loggingUploadObjectReadBehavior/README.md @@ -0,0 +1,14 @@ +# Example + +This example shows how you could wrap the reader of an file being +uploaded to Amazon S3 with a logger that will log the usage of the +reader, and print call stacks when the reader's Read, Seek, or ReadAt +methods encounter an error. + +# Usage + +This bucket uses the bucket name, key, and local file name passed to upload the local file to S3 as the key into the bucket. + +```sh +AWS_REGION=us-west-2 AWS_PROFILE=default go run . "mybucket" "10MB.file" ./10MB.file +``` \ No newline at end of file diff --git a/example/service/s3/loggingUploadObjectReadBehavior/main.go b/example/service/s3/loggingUploadObjectReadBehavior/main.go new file mode 100644 index 00000000..455407eb --- /dev/null +++ b/example/service/s3/loggingUploadObjectReadBehavior/main.go @@ -0,0 +1,118 @@ +package main + +import ( + "fmt" + "io" + "log" + "os" + "runtime/debug" + + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/aws/session" + "github.com/IBM/ibm-cos-sdk-go/service/s3/s3manager" +) + +// Usage: +// go run -tags example +// +// Example: +// AWS_REGION=us-west-2 AWS_PROFILE=default go run . "mybucket" "10MB.file" ./10MB.file +func main() { + sess, err := session.NewSession() + if err != nil { + log.Fatalf("failed to load session, %v", err) + } + + uploader := s3manager.NewUploader(sess) + + file, err := os.Open(os.Args[3]) + if err != nil { + log.Fatalf("failed to open file, %v", err) + } + defer file.Close() + + // Wrap the readSeeker with a logger that will log usage, and stack traces + // on errors. + readLogger := NewReadLogger(file, sess.Config.Logger) + + // Upload with read logger + resp, err := uploader.Upload(&s3manager.UploadInput{ + Bucket: &os.Args[1], + Key: &os.Args[2], + Body: readLogger, + }, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.RequestOptions = append(u.RequestOptions, func(r *request.Request) { + }) + }) + + fmt.Println(resp, err) +} + +// Logger is a logger use for logging the readers usage. +type Logger interface { + Log(args ...interface{}) +} + +// ReadSeeker interface provides the interface for a Reader, Seeker, and ReadAt. +type ReadSeeker interface { + io.ReadSeeker + io.ReaderAt +} + +// ReadLogger wraps an reader with logging for access. +type ReadLogger struct { + reader ReadSeeker + logger Logger +} + +// NewReadLogger a ReadLogger that wraps the passed in ReadSeeker (Reader, +// Seeker, ReadAt) with a logger. +func NewReadLogger(r ReadSeeker, logger Logger) *ReadLogger { + return &ReadLogger{ + reader: r, + logger: logger, + } +} + +// Seek offsets the reader's current position for the next read. +func (s *ReadLogger) Seek(offset int64, mode int) (int64, error) { + newOffset, err := s.reader.Seek(offset, mode) + msg := fmt.Sprintf( + "ReadLogger.Seek(offset:%d, mode:%d) (newOffset:%d, err:%v)", + offset, mode, newOffset, err) + if err != nil { + msg += fmt.Sprintf("\n\tStack:\n%s", string(debug.Stack())) + } + + s.logger.Log(msg) + return newOffset, err +} + +// Read attempts to read from the reader, returning the bytes read, or error. +func (s *ReadLogger) Read(b []byte) (int, error) { + n, err := s.reader.Read(b) + msg := fmt.Sprintf( + "ReadLogger.Read(len(bytes):%d) (read:%d, err:%v)", + len(b), n, err) + if err != nil { + msg += fmt.Sprintf("\n\tStack:\n%s", string(debug.Stack())) + } + + s.logger.Log(msg) + return n, err +} + +// ReadAt will read the underlying reader starting at the offset. +func (s *ReadLogger) ReadAt(b []byte, offset int64) (int, error) { + n, err := s.reader.ReadAt(b, offset) + msg := fmt.Sprintf( + "ReadLogger.ReadAt(len(bytes):%d, offset:%d) (read:%d, err:%v)", + len(b), offset, n, err) + if err != nil { + msg += fmt.Sprintf("\n\tStack:\n%s", string(debug.Stack())) + } + + s.logger.Log(msg) + return n, err +} diff --git a/internal/sdktesting/env.go b/internal/sdktesting/env.go new file mode 100644 index 00000000..78d1f3af --- /dev/null +++ b/internal/sdktesting/env.go @@ -0,0 +1,53 @@ +package sdktesting + +import ( + "os" + "runtime" + "strings" +) + +// StashEnv stashes the current environment variables except variables listed in envToKeepx +// Returns an function to pop out old environment +func StashEnv(envToKeep ...string) func() { + if runtime.GOOS == "windows" { + envToKeep = append(envToKeep, "ComSpec") + envToKeep = append(envToKeep, "SYSTEM32") + envToKeep = append(envToKeep, "SYSTEMROOT") + } + envToKeep = append(envToKeep, "PATH") + extraEnv := getEnvs(envToKeep) + originalEnv := os.Environ() + os.Clearenv() // clear env + for key, val := range extraEnv { + os.Setenv(key, val) + } + return func() { + popEnv(originalEnv) + } +} + +func getEnvs(envs []string) map[string]string { + extraEnvs := make(map[string]string) + for _, env := range envs { + if val, ok := os.LookupEnv(env); ok && len(val) > 0 { + extraEnvs[env] = val + } + } + return extraEnvs +} + +// PopEnv takes the list of the environment values and injects them into the +// process's environment variable data. Clears any existing environment values +// that may already exist. +func popEnv(env []string) { + os.Clearenv() + + for _, e := range env { + p := strings.SplitN(e, "=", 2) + k, v := p[0], "" + if len(p) > 1 { + v = p[1] + } + os.Setenv(k, v) + } +} diff --git a/internal/shareddefaults/shared_config_other_test.go b/internal/shareddefaults/shared_config_other_test.go index 2e17a305..d31282d4 100644 --- a/internal/shareddefaults/shared_config_other_test.go +++ b/internal/shareddefaults/shared_config_other_test.go @@ -7,13 +7,13 @@ import ( "path/filepath" "testing" - "github.com/IBM/ibm-cos-sdk-go/awstesting" + "github.com/IBM/ibm-cos-sdk-go/internal/sdktesting" "github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults" ) func TestSharedCredsFilename(t *testing.T) { - env := awstesting.StashEnv() - defer awstesting.PopEnv(env) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() os.Setenv("HOME", "home_dir") os.Setenv("USERPROFILE", "profile_dir") @@ -27,8 +27,8 @@ func TestSharedCredsFilename(t *testing.T) { } func TestSharedConfigFilename(t *testing.T) { - env := awstesting.StashEnv() - defer awstesting.PopEnv(env) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() os.Setenv("HOME", "home_dir") os.Setenv("USERPROFILE", "profile_dir") diff --git a/internal/shareddefaults/shared_config_windows_test.go b/internal/shareddefaults/shared_config_windows_test.go index d98da929..9cd02c64 100644 --- a/internal/shareddefaults/shared_config_windows_test.go +++ b/internal/shareddefaults/shared_config_windows_test.go @@ -7,13 +7,13 @@ import ( "path/filepath" "testing" - "github.com/IBM/ibm-cos-sdk-go/awstesting" + "github.com/IBM/ibm-cos-sdk-go/internal/sdktesting" "github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults" ) func TestSharedCredsFilename(t *testing.T) { - env := awstesting.StashEnv() - defer awstesting.PopEnv(env) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() os.Setenv("HOME", "home_dir") os.Setenv("USERPROFILE", "profile_dir") @@ -27,8 +27,8 @@ func TestSharedCredsFilename(t *testing.T) { } func TestSharedConfigFilename(t *testing.T) { - env := awstesting.StashEnv() - defer awstesting.PopEnv(env) + restoreEnvFn := sdktesting.StashEnv() + defer restoreEnvFn() os.Setenv("HOME", "home_dir") os.Setenv("USERPROFILE", "profile_dir") diff --git a/models/apis/s3/2006-03-01/api-2.json b/models/apis/s3/2006-03-01/api-2.json index a26585fe..c986ec00 100644 --- a/models/apis/s3/2006-03-01/api-2.json +++ b/models/apis/s3/2006-03-01/api-2.json @@ -27,6 +27,14 @@ ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadAbort.html" }, + "AddLegalHold":{ + "name":"AddLegalHold", + "http":{ + "method":"POST", + "requestUri":"/{Bucket}/{Key+}?legalHold" + }, + "input":{"shape":"AddLegalHoldRequest"} + }, "CompleteMultipartUpload":{ "name":"CompleteMultipartUpload", "http":{ @@ -97,6 +105,23 @@ "input":{"shape":"DeleteBucketCorsRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEcors.html" }, + "DeleteBucketLifecycle":{ + "name":"DeleteBucketLifecycle", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?lifecycle" + }, + "input":{"shape":"DeleteBucketLifecycleRequest"}, + "documentationUrl":"https://cloud.ibm.com/docs/services/cloud-object-storage?topic=cloud-object-storage-compatibility-api-bucket-operations#delete-the-lifecycle-configuration-for-a-bucket" + }, + "DeleteLegalHold":{ + "name":"DeleteLegalHold", + "http":{ + "method":"POST", + "requestUri":"/{Bucket}/{Key+}?legalHold" + }, + "input":{"shape":"DeleteLegalHoldRequest"} + }, "DeleteObject":{ "name":"DeleteObject", "http":{ @@ -119,6 +144,14 @@ "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/multiobjectdeleteapi.html", "alias":"DeleteMultipleObjects" }, + "ExtendObjectRetention":{ + "name":"ExtendObjectRetention", + "http":{ + "method":"POST", + "requestUri":"/{Bucket}/{Key+}?extendRetention" + }, + "input":{"shape":"ExtendObjectRetentionRequest"} + }, "GetBucketAcl":{ "name":"GetBucketAcl", "http":{ @@ -139,6 +172,16 @@ "output":{"shape":"GetBucketCorsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETcors.html" }, + "GetBucketLifecycleConfiguration":{ + "name":"GetBucketLifecycleConfiguration", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?lifecycle" + }, + "input":{"shape":"GetBucketLifecycleConfigurationRequest"}, + "output":{"shape":"GetBucketLifecycleConfigurationOutput"}, + "documentationUrl":"https://cloud.ibm.com/docs/services/cloud-object-storage?topic=cloud-object-storage-compatibility-api-bucket-operations#retrieve-a-bucket-lifecycle-configuration" + }, "GetBucketLocation":{ "name":"GetBucketLocation", "http":{ @@ -159,6 +202,16 @@ "output":{"shape":"GetBucketLoggingOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlogging.html" }, + "GetBucketProtectionConfiguration":{ + "name":"GetBucketProtectionConfiguration", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?protection" + }, + "input":{"shape":"GetBucketProtectionConfigurationRequest"}, + "output":{"shape":"GetBucketProtectionConfigurationOutput"} + }, + "GetObject":{ "name":"GetObject", "http":{ @@ -233,6 +286,15 @@ "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTServiceGET.html", "alias":"GetServiceExtended" }, + "ListLegalHolds":{ + "name":"ListLegalHolds", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}/{Key+}?legalHold" + }, + "input":{"shape":"ListLegalHoldsRequest"}, + "output":{"shape":"ListLegalHoldsOutput"} + }, "ListMultipartUploads":{ "name":"ListMultipartUploads", "http":{ @@ -285,6 +347,15 @@ "input":{"shape":"PutBucketCorsRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTcors.html" }, + "PutBucketLifecycleConfiguration":{ + "name":"PutBucketLifecycleConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?lifecycle" + }, + "input":{"shape":"PutBucketLifecycleConfigurationRequest"}, + "documentationUrl":"https://cloud.ibm.com/docs/services/cloud-object-storage?topic=cloud-object-storage-compatibility-api-bucket-operations#compatibility-api-create-bucket-lifecycle" + }, "PutBucketLogging":{ "name":"PutBucketLogging", "http":{ @@ -294,6 +365,14 @@ "input":{"shape":"PutBucketLoggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlogging.html" }, + "PutBucketProtectionConfiguration":{ + "name":"PutBucketProtectionConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?protection" + }, + "input":{"shape":"PutBucketProtectionConfigurationRequest"} + }, "PutObject":{ "name":"PutObject", "http":{ @@ -317,6 +396,19 @@ ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html" }, + "RestoreObject":{ + "name":"RestoreObject", + "http":{ + "method":"POST", + "requestUri":"/{Bucket}/{Key+}?restore" + }, + "input":{"shape":"RestoreObjectRequest"}, + "errors":[ + {"shape":"ObjectAlreadyInActiveTierError"} + ], + "__documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", + "alias":"PostObjectRestore" + }, "UploadPart":{ "name":"UploadPart", "http":{ @@ -412,6 +504,32 @@ } }, "AccountId":{"type":"string"}, + "AdditionalRetentionPeriod":{"type":"integer"}, + "AddLegalHoldRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key", + "RetentionLegalHoldId" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "RetentionLegalHoldId":{ + "shape":"RetentionLegalHoldId", + "location":"querystring", + "locationName":"add" + } + } + }, "AllowQuotedRecordDelimiter":{"type":"boolean"}, "AllowedHeader":{"type":"string"}, "AllowedHeaders":{ @@ -574,6 +692,16 @@ "Suspended" ] }, + "BucketProtectionDefaultRetention":{ + "type":"structure", + "required":["Days"], + "members":{ + "Days":{ + "shape":"Days" + } + } + }, + "BucketProtectionEnablePermanentRetention":{"type":"boolean"}, "Buckets":{ "type":"list", "member":{ @@ -581,6 +709,30 @@ "locationName":"Bucket" } }, + "BucketProtectionMaximumRetention":{ + "type":"structure", + "required":["Days"], + "members":{ + "Days":{ + "shape":"Days" + } + } + }, + "BucketProtectionMinimumRetention":{ + "type":"structure", + "required":["Days"], + "members":{ + "Days":{ + "shape":"Days" + } + } + }, + "BucketProtectionStatus":{ + "type":"string", + "enum":[ + "Retention" + ] + }, "BucketsExtended":{ "type":"list", "member":{ @@ -752,6 +904,21 @@ "shape":"RequestPayer", "location":"header", "locationName":"x-amz-request-payer" + }, + "RetentionExpirationDate":{ + "shape":"RetentionExpirationDate", + "location":"header", + "locationName":"Retention-Expiration-Date" + }, + "RetentionLegalHoldId":{ + "shape":"RetentionLegalHoldId", + "location":"header", + "locationName":"Retention-Legal-Hold-ID" + }, + "RetentionPeriod":{ + "shape":"RetentionPeriod", + "location":"header", + "locationName":"Retention-Period" } }, "payload":"MultipartUpload" @@ -961,6 +1128,26 @@ "location":"header", "locationName":"x-amz-metadata-directive" }, + "RetentionDirective":{ + "shape":"RetentionDirective", + "location":"header", + "locationName":"Retention-Directive" + }, + "RetentionExpirationDate":{ + "shape":"RetentionExpirationDate", + "location":"header", + "locationName":"Retention-Expiration-Date" + }, + "RetentionLegalHoldId":{ + "shape":"RetentionLegalHoldId", + "location":"header", + "locationName":"Retention-Legal-Hold-ID" + }, + "RetentionPeriod":{ + "shape":"RetentionPeriod", + "location":"header", + "locationName":"Retention-Period" + }, "TaggingDirective":{ "shape":"TaggingDirective", "location":"header", @@ -1335,6 +1522,17 @@ } } }, + "DeleteBucketLifecycleRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, "DeleteBucketRequest":{ "type":"structure", "required":["Bucket"], @@ -1346,6 +1544,31 @@ } } }, + "DeleteLegalHoldRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key", + "RetentionLegalHoldId" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "RetentionLegalHoldId":{ + "shape":"RetentionLegalHoldId", + "location":"querystring", + "locationName":"remove" + } + } + }, "DeleteMarker":{"type":"boolean"}, "DeleteMarkerEntry":{ "type":"structure", @@ -1592,6 +1815,48 @@ "type":"string", "enum":["SQL"] }, + "ExtendObjectRetentionRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "AdditionalRetentionPeriod":{ + "shape":"AdditionalRetentionPeriod", + "location":"header", + "locationName":"Additional-Retention-Period" + }, + "ExtendRetentionFromCurrentTime":{ + "shape":"ExtendRetentionFromCurrentTime", + "location":"header", + "locationName":"Extend-Retention-From-Current-Time" + }, + "NewRetentionExpirationDate":{ + "shape":"NewRetentionExpirationDate", + "location":"header", + "locationName":"New-Retention-Expiration-Date" + }, + "NewRetentionPeriod":{ + "shape":"NewRetentionPeriod", + "location":"header", + "locationName":"New-Retention-Period" + } + } + }, + "ExtendRetentionFromCurrentTime":{ + "type":"integer" + }, "FetchOwner":{"type":"boolean"}, "FieldDelimiter":{"type":"string"}, "FileHeaderInfo":{ @@ -1663,6 +1928,26 @@ } } }, + "GetBucketLifecycleConfigurationOutput":{ + "type":"structure", + "members":{ + "Rules":{ + "shape":"LifecycleRules", + "locationName":"Rule" + } + } + }, + "GetBucketLifecycleConfigurationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, "GetBucketLocationOutput":{ "type":"structure", "members":{ @@ -1697,6 +1982,24 @@ } } }, + "GetBucketProtectionConfigurationOutput":{ + "type":"structure", + "members":{ + "ProtectionConfiguration":{"shape":"ProtectionConfiguration"} + }, + "payload":"ProtectionConfiguration" + }, + "GetBucketProtectionConfigurationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, "GetObjectAclOutput":{ "type":"structure", "members":{ @@ -1843,6 +2146,21 @@ "location":"headers", "locationName":"x-amz-meta-" }, + "RetentionExpirationDate":{ + "shape":"RetentionExpirationDate", + "location":"header", + "locationName":"Retention-Expiration-Date" + }, + "RetentionLegalHoldCount":{ + "shape":"RetentionLegalHoldCount", + "location":"header", + "locationName":"Retention-Legal-Hold-Count" + }, + "RetentionPeriod":{ + "shape":"RetentionPeriod", + "location":"header", + "locationName":"Retention-Period" + }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", "location":"header", @@ -1882,6 +2200,16 @@ "shape":"TagCount", "location":"header", "locationName":"x-amz-tagging-count" + }, + "IBMTransition":{ + "shape":"IBMTransition", + "location":"header", + "locationName":"x-ibm-transition" + }, + "IBMRestoredCopyStorageClass":{ + "shape":"IBMRestoredCopyStorageClass", + "location":"header", + "locationName":"x-ibm-restored-copy-storage-class" } }, "payload":"Body" @@ -2154,6 +2482,21 @@ "location":"headers", "locationName":"x-amz-meta-" }, + "RetentionExpirationDate":{ + "shape":"RetentionExpirationDate", + "location":"header", + "locationName":"Retention-Expiration-Date" + }, + "RetentionLegalHoldCount":{ + "shape":"RetentionLegalHoldCount", + "location":"header", + "locationName":"Retention-Legal-Hold-Count" + }, + "RetentionPeriod":{ + "shape":"RetentionPeriod", + "location":"header", + "locationName":"Retention-Period" + }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", "location":"header", @@ -2188,6 +2531,16 @@ "shape":"PartsCount", "location":"header", "locationName":"x-amz-mp-parts-count" + }, + "IBMTransition":{ + "shape":"IBMTransition", + "location":"header", + "locationName":"x-ibm-transition" + }, + "IBMRestoredCopyStorageClass":{ + "shape":"IBMRestoredCopyStorageClass", + "location":"header", + "locationName":"x-ibm-restored-copy-storage-class" } } }, @@ -2268,10 +2621,12 @@ "HostName":{"type":"string"}, "HttpErrorCodeReturnedEquals":{"type":"string"}, "HttpRedirectCode":{"type":"string"}, + "IBMRestoredCopyStorageClass":{"type":"string"}, "IBMServiceInstanceId":{"type":"string"}, "IBMSSEKPCustomerRootKeyCrn":{"type":"string"}, "IBMSSEKPEncryptionAlgorithm":{"type":"string"}, "IBMSSEKPEnabled":{"type": "boolean"}, + "IBMTransition":{"type":"string"}, "ID":{"type":"string"}, "IfMatch":{"type":"string"}, "IfModifiedSince":{"type":"timestamp"}, @@ -2466,12 +2821,28 @@ "flattened":true }, "LastModified":{"type":"timestamp"}, + "LegalHold":{ + "type":"structure", + "members":{ + "Date":{ + "shape":"Date" + }, + "ID":{ + "shape":"LegalHoldID" + } + } + }, + "LegalHoldID":{"type":"string"}, + "LegalHolds":{ + "type": "list", + "member":{"shape":"LegalHold"} + }, "LifecycleConfiguration":{ "type":"structure", "required":["Rules"], "members":{ "Rules":{ - "shape":"Rules", + "shape":"LifecycleRules", "locationName":"Rule" } } @@ -2480,32 +2851,24 @@ "type":"structure", "members":{ "Date":{"shape":"Date"}, - "Days":{"shape":"Days"}, - "ExpiredObjectDeleteMarker":{"shape":"ExpiredObjectDeleteMarker"} + "Days":{"shape":"Days"} } }, "LifecycleRule":{ "type":"structure", - "required":["Status"], + "required":[ + "Status", + "Filter" + ], "members":{ "Expiration":{"shape":"LifecycleExpiration"}, "ID":{"shape":"ID"}, - "Prefix":{ - "shape":"Prefix", - "deprecated":true - }, "Filter":{"shape":"LifecycleRuleFilter"}, "Status":{"shape":"ExpirationStatus"}, "Transitions":{ "shape":"TransitionList", "locationName":"Transition" - }, - "NoncurrentVersionTransitions":{ - "shape":"NoncurrentVersionTransitionList", - "locationName":"NoncurrentVersionTransition" - }, - "NoncurrentVersionExpiration":{"shape":"NoncurrentVersionExpiration"}, - "AbortIncompleteMultipartUpload":{"shape":"AbortIncompleteMultipartUpload"} + } } }, "LifecycleRuleAndOperator":{ @@ -2522,9 +2885,7 @@ "LifecycleRuleFilter":{ "type":"structure", "members":{ - "Prefix":{"shape":"Prefix"}, - "Tag":{"shape":"Tag"}, - "And":{"shape":"LifecycleRuleAndOperator"} + "Prefix":{"shape":"Prefix"} } }, "LifecycleRules":{ @@ -2585,6 +2946,42 @@ } } }, + "ListLegalHoldsOutput":{ + "type":"structure", + "members":{ + "CreateTime":{ + "shape":"Date" + }, + "LegalHolds":{ + "shape":"LegalHolds" + }, + "RetentionPeriod":{ + "shape":"RetentionPeriod" + }, + "RetentionPeriodExpirationDate":{ + "shape":"RetentionPeriodExpirationDate" + } + } + }, + "ListLegalHoldsRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + } + } + }, "ListMultipartUploadsOutput":{ "type":"structure", "members":{ @@ -2893,6 +3290,13 @@ "member":{"shape":"MultipartUpload"}, "flattened":true }, + "NewRetentionExpirationDate":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "NewRetentionPeriod":{ + "type":"integer" + }, "NextKeyMarker":{"type":"string"}, "NextMarker":{"type":"string"}, "NextPartNumberMarker":{"type":"integer"}, @@ -3146,6 +3550,32 @@ }, "event":true }, + "ProtectionConfiguration":{ + "type":"structure", + "required":[ + "Status", + "MinimumRetention", + "DefaultRetention", + "MaximumRetention" + ], + "members":{ + "Status":{ + "shape":"BucketProtectionStatus" + }, + "MinimumRetention":{ + "shape":"BucketProtectionMinimumRetention" + }, + "DefaultRetention":{ + "shape":"BucketProtectionDefaultRetention" + }, + "MaximumRetention":{ + "shape":"BucketProtectionMaximumRetention" + }, + "EnablePermanentRetention":{ + "shape":"BucketProtectionEnablePermanentRetention" + } + } + }, "Protocol":{ "type":"string", "enum":[ @@ -3256,6 +3686,26 @@ }, "payload":"CORSConfiguration" }, + "PutBucketLifecycleConfigurationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "LifecycleConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "LifecycleConfiguration":{ + "shape":"LifecycleConfiguration", + "locationName":"LifecycleConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"LifecycleConfiguration" + }, "PutBucketLoggingRequest":{ "type":"structure", "required":[ @@ -3281,6 +3731,25 @@ }, "payload":"BucketLoggingStatus" }, + "PutBucketProtectionConfigurationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "ProtectionConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ProtectionConfiguration":{ + "shape":"ProtectionConfiguration", + "locationName":"ProtectionConfiguration" + } + }, + "payload":"ProtectionConfiguration" + }, "PutObjectAclOutput":{ "type":"structure", "members":{ @@ -3497,6 +3966,21 @@ "location":"headers", "locationName":"x-amz-meta-" }, + "RetentionExpirationDate":{ + "shape":"RetentionExpirationDate", + "location":"header", + "locationName":"Retention-Expiration-Date" + }, + "RetentionLegalHoldId":{ + "shape":"RetentionLegalHoldId", + "location":"header", + "locationName":"Retention-Legal-Hold-ID" + }, + "RetentionPeriod":{ + "shape":"RetentionPeriod", + "location":"header", + "locationName":"Retention-Period" + }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", "location":"header", @@ -3730,6 +4214,65 @@ "ResponseContentType":{"type":"string"}, "ResponseExpires":{"type":"timestamp"}, "Restore":{"type":"string"}, + "RestoreObjectRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "RestoreRequest":{ + "shape":"RestoreRequest", + "locationName":"RestoreRequest", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"RestoreRequest" + }, + "RestoreRequest":{ + "type":"structure", + "required":["Days"], + "members":{ + "Days":{ + "shape":"Days" + }, + "GlacierJobParameters":{ + "shape":"GlacierJobParameters" + } + } + }, + "RetentionDirective":{ + "type":"string", + "enum":[ + "COPY", + "REPLACE" + ] + }, + "RetentionExpirationDate":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "RetentionPeriodExpirationDate":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "RetentionLegalHoldCount": { + "type": "integer" + }, + "RetentionLegalHoldId":{"type":"string"}, + "RetentionPeriod":{ + "type":"integer" + }, "Role":{"type":"string"}, "RoutingRule":{ "type":"structure", @@ -4007,9 +4550,7 @@ "Tier":{ "type":"string", "enum":[ - "Standard", - "Bulk", - "Expedited" + "Bulk" ] }, "Token":{"type":"string"}, @@ -4069,11 +4610,7 @@ "TransitionStorageClass":{ "type":"string", "enum":[ - "GLACIER", - "STANDARD_IA", - "ONEZONE_IA", - "INTELLIGENT_TIERING", - "DEEP_ARCHIVE" + "GLACIER" ] }, "Type":{ @@ -4342,4 +4879,4 @@ }, "WebsiteRedirectLocation":{"type":"string"} } -} \ No newline at end of file +} diff --git a/models/apis/s3/2006-03-01/docs-2.json b/models/apis/s3/2006-03-01/docs-2.json index 18031d24..dc864875 100644 --- a/models/apis/s3/2006-03-01/docs-2.json +++ b/models/apis/s3/2006-03-01/docs-2.json @@ -3,29 +3,40 @@ "service": "

", "operations": { "AbortMultipartUpload": "

Aborts a multipart upload.

To verify that all parts have been removed, so you don't get charged for the part storage, you should call the List Parts operation and ensure the parts list is empty.

", + "AddLegalHold": "

Add a legal hold on an object. The legal hold identifiers are stored in the object metadata along with the timestamp of when they are POSTed to the object. The presence of any legal hold identifiers prevents the modification or deletion of the object data, even if the retention period has expired. Legal Holds can only be added to objects in a bucket with a protection policy. Otherwise a 400 error will be returned.

", "CompleteMultipartUpload": "

Completes a multipart upload by assembling previously uploaded parts.

", "CopyObject": "

Creates a copy of an object that is already stored in Amazon S3.

", "CreateBucket": "

Creates a new bucket.

", "CreateMultipartUpload": "

Initiates a multipart upload and returns an upload ID.

Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

", + "CompleteMultipartUpload":"

Completes a multipart upload by assembling previously uploaded parts.

", "DeleteBucket": "

Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted.

", "DeleteBucketCors": "

Deletes the cors configuration information set for the bucket.

", + "DeleteBucketLifecycle": "

Deletes the lifecycle configuration from the bucket.

", + "DeleteLegalHold": "

Remove Legal hold on an object. The legal hold identifiers are stored in the object metadata along with the timestamp of when they are POSTed to the object. The presence of any legal hold identifiers prevents the modification or deletion of the object data, even if the retention period has expired.

", "DeleteObject": "

Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.

", "DeleteObjects": "

This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.

", + "ExtendObjectRetention": "

This implementation of the POST operation uses the extendRetention sub-resource to extend the retention period of a protected object in a protected vault.

", "GetBucketAcl": "

Gets the access control policy for the bucket.

", "GetBucketCors": "

Returns the cors configuration for the bucket.

", + "GetBucketLifecycleConfiguration": "

Returns the lifecycle configuration for the bucket.

", "GetBucketLocation": "

Returns the region the bucket resides in.

", + "GetBucketProtectionConfiguration": "

Returns the protection configuration of a bucket.EnablePermanentRetention flag will only be returned if the flag is set to true for a bucket.

", "GetObject": "

Retrieves objects from Amazon S3.

", "GetObjectAcl": "

Returns the access control list (ACL) of an object.

", "HeadBucket": "

This operation is useful to determine if a bucket exists and you have permission to access it.

", "HeadObject": "

The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

", "ListBuckets": "

Returns a list of all buckets owned by the authenticated sender of the request.

", + "ListLegalHolds": "

Returns a list of legal holds on an object.

", "ListMultipartUploads": "

This operation lists in-progress multipart uploads.

", "ListObjects": "

Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket.

", "ListParts": "

Lists the parts that have been uploaded for a specific multipart upload.

", "PutBucketAcl": "

Sets the permissions on a bucket using access control lists (ACL).

", "PutBucketCors": "

Sets the cors configuration for a bucket.

", + "PutBucketProtectionConfiguration": "

Sets the protection configuration of an existing bucket. EnablePermanentRetention is optional and if not included is considered to be false. Once set to true, must be included in any subsequent PUT Bucket?protection requests for that bucket.

", + "PutBucketLifecycleConfiguration": "

Sets the lifecycle configuration for a bucket.

", "PutObject": "

Adds an object to a bucket.

", "PutObjectAcl": "

uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket

", + "RestoreObject": "

Restores an archived copy of an object back into Amazon S3

", "UploadPart": "

Uploads a part in a multipart upload.

Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

", "UploadPartCopy": "

Uploads a part by copying data from an existing object as data source.

" }, @@ -95,6 +106,12 @@ "InventoryS3BucketDestination$AccountId": "

The ID of the account that owns the destination bucket.

" } }, + "AdditionalRetentionPeriod": { + "base": null, + "refs": { + "ExtendObjectRetentionRequest$AdditionalRetentionPeriod": "

Additional time, in seconds, to add to the existing retention period for the object. If this field and New-Retention-Time and/or New-Retention-Expiration-Date are specified, a 400 error will be returned. If none of the Request Headers are specified, a 400 error will be returned to the user. The retention period of an object may be extended up to bucket maximum retention period from the time of the request.

" + } + }, "AllowQuotedRecordDelimiter": { "base": null, "refs": { @@ -295,6 +312,7 @@ "GetBucketLifecycleRequest$Bucket": "

", "GetBucketLocationRequest$Bucket": "

", "GetBucketLoggingRequest$Bucket": "

", + "GetBucketProtectionConfigurationRequest$Bucket": "

", "GetBucketMetricsConfigurationRequest$Bucket": "

The name of the bucket containing the metrics configuration to retrieve.

", "GetBucketNotificationConfigurationRequest$Bucket": "

Name of the bucket to get the notification configuration for.

", "GetBucketPolicyRequest$Bucket": "

", @@ -340,6 +358,7 @@ "PutBucketMetricsConfigurationRequest$Bucket": "

The name of the bucket for which the metrics configuration is set.

", "PutBucketNotificationConfigurationRequest$Bucket": "

", "PutBucketNotificationRequest$Bucket": "

", + "PutBucketProtectionConfigurationRequest$Bucket": "

", "PutBucketPolicyRequest$Bucket": "

", "PutBucketReplicationRequest$Bucket": "

", "PutBucketRequestPaymentRequest$Bucket": "

", @@ -360,6 +379,36 @@ "UploadPartRequest$Bucket": "

Name of the bucket to which the multipart upload was initiated.

" } }, + "BucketProtectionStatus": { + "base": null, + "refs": { + "ProtectionConfiguration$Status": "

Retention status of a bucket.

" + } + }, + "BucketProtectionMinimumRetention": { + "base": null, + "refs": { + "ProtectionConfiguration$MinimumRetention":"

Minimum retention period for an object, if a PUT of an object specifies a shorter retention period the PUT object will fail.

" + } + }, + "BucketProtectionDefaultRetention": { + "base": null, + "refs": { + "ProtectionConfiguration$DefaultRetention":"

Default retention period for an object, if a PUT of an object does not specify a retention period this value will be converted to seconds and used.

" + } + }, + "BucketProtectionMaximumRetention": { + "base": null, + "refs": { + "ProtectionConfiguration$MaximumRetention": "

Maximum retention period for an object, if a PUT of an object specifies a longer retention period the PUT object will fail.

" + } + }, + "BucketProtectionEnablePermanentRetention": { + "base": null, + "refs": { + "ProtectionConfiguration$EnablePermanentRetention":"

Enable permanent retention for an object.

" + } + }, "BucketVersioningStatus": { "base": null, "refs": { @@ -1113,6 +1162,12 @@ "SelectParameters$ExpressionType": "

The type of the provided expression (e.g., SQL).

" } }, + "ExtendRetentionFromCurrentTime": { + "base": "

Retention Period in seconds for the object. The Retention will be enforced from the current time until current time + the value in this header. This value has to be within the ranges defined for the bucket.

", + "refs": { + "ExtendObjectRetentionRequest$ExtendRetentionFromCurrentTime": "

Retention Period in seconds. The Retention will be enforced from the object creation time until current time + the value in this header. This value has to be within the ranges defined for the bucket. If this field and Additional-Retention-Period and/or New-Retention-Period and/or New-Retentiion-Expiration-Date are specified, a 400 error will be returned. If none of the Request Headers are specified, a 400 error will be returned to the user. The retention period of an object may be extended up to bucket maximum retention period from the time of the request.

" + } + }, "FetchOwner": { "base": null, "refs": { @@ -1141,7 +1196,7 @@ "FilterRuleList": { "base": "

A list of containers for the key value pair that defines the criteria for the filter rule.

", "refs": { - "S3KeyFilter$FilterRules": null + "S3KeyFilter$FilterRules": "

" } }, "FilterRuleName": { @@ -1281,6 +1336,17 @@ "refs": { } }, + "GetBucketProtectionConfigurationOutput": { + "base": null, + "refs": { + + } + }, + "GetBucketProtectionConfigurationRequest": { + "base": null, + "refs": { + } + }, "GetBucketReplicationOutput": { "base": null, "refs": { @@ -1382,6 +1448,12 @@ "RestoreRequest$GlacierJobParameters": "

Glacier related parameters pertaining to this job. Do not use with restores that specify OutputLocation.

" } }, + "GlacierJobParameters": { + "base": null, + "refs": { + "RestoreRequest$Days": "

Glacier related parameters pertaining to this job.

" + } + }, "Grant": { "base": "

", "refs": { @@ -1518,6 +1590,20 @@ "CreateBucketRequest$IBMSSEKPEncryptionAlgorithm" : null } }, + "IBMRestoreCopyStorageClass":{ + "base": null, + "refs": { + "GetObjectOutput$IBMRestoreCopyStorageClass": "

This header is only included if an object has transition metadata. This header will indicate the transition storage class and time of transition. If this header and the x-amz-restore header are both included, this header will indicate the time at which the object was originally archived.

", + "HeadObjectOutput$IBMRestoreCopyStorageClass": "

This header is only included if an object has transition metadata. This header will indicate the transition storage class and time of transition. If this header and the x-amz-restore header are both included, this header will indicate the time at which the object was originally archived.

" + } + }, + "IBMTransition":{ + "base": null, + "refs": { + "GetObjectOutput$IBMTransition": "

This header is only included if an object has transition metadata. This header will indicate the transition storage class and time of transition. If this header and the x-amz-restore header are both included, this header will indicate the time at which the object was originally archived.

", + "HeadObjectOutput$IBMTransition": "

This header is only included if an object has transition metadata. This header will indicate the transition storage class and time of transition. If this header and the x-amz-restore header are both included, this header will indicate the time at which the object was originally archived.

" + } + }, "ID": { "base": null, "refs": { @@ -1802,7 +1888,7 @@ } }, "LifecycleRules": { - "base": null, + "base": "

Currently only one Rule allowed.

", "refs": { "BucketLifecycleConfiguration$Rules": "

", "GetBucketLifecycleConfigurationOutput$Rules": "

" @@ -2084,6 +2170,18 @@ "ListMultipartUploadsOutput$Uploads": "

" } }, + "NewRetentionPeriod":{ + "base": null, + "refs": { + "ExtendObjectRetentionRequest$NewRetentionPeriod": "

Retention period, in seconds, to use for the object in place of the existing retention period stored for the object. If this value is less than the existing value stored for the object, a 400 error will be returned. If this field and Additional-Retention-Period and/or New-Retention-Expiration-Date are specified, a 400 error will be returned. If none of the Request Headers are specified, a 400 error will be returned.

" + } + }, + "NewRetentionExpirationDate": { + "base": null, + "refs": { + "ExtendObjectRetentionRequest$NewRetentionExpirationDate": "

A new retention date to use for the object in place of the existing retention date. If this value is less than the existing value stored for the object, a 400 error will be returned. If this field and Additional-Retention-Period and/or New-Retention-Period and/or Extend-Retention-From-Current-Time are specified, a 400 error will be returned. If none of the Request Headers are specified, a 400 error will be returned to the user. The retention period of an object may be extended up to bucket maximum retention period from the time of the request.

" + } + }, "NextKeyMarker": { "base": null, "refs": { @@ -2437,7 +2535,7 @@ "InventoryS3BucketDestination$Prefix": "

The prefix that is prepended to all inventory results.

", "LifecycleRule$Prefix": "

Prefix identifying one or more objects to which the rule applies. This is No longer used; use Filter instead.

", "LifecycleRuleAndOperator$Prefix": "

", - "LifecycleRuleFilter$Prefix": "

Prefix identifying one or more objects to which the rule applies.

", + "LifecycleRuleFilter$Prefix": "

Empty prefix allowed only.

", "ListMultipartUploadsOutput$Prefix": "

When a prefix is provided in the request, this field contains the specified prefix. The result contains only keys starting with the specified prefix.

", "ListMultipartUploadsRequest$Prefix": "

Lists in-progress uploads only for those keys that begin with the specified prefix.

", "ListObjectVersionsOutput$Prefix": "

", @@ -2474,6 +2572,12 @@ "SelectObjectContentEventStream$Progress": "

The Progress Event.

" } }, + "ProtectionConfiguration":{ + "base": "

", + "refs": { + "GetBucketProtectionConfigurationOutput$ProtectionConfiguration": "

Bucket protection configuration

" + } + }, "Protocol": { "base": null, "refs": { @@ -2875,6 +2979,55 @@ "RestoreRequest$Type": "

Type of restore request.

" } }, + "RetentionDirective": { + "base": null, + "refs": { + "CopyObjectRequest$RetentionDirective": "

This header controls how the Protection state of the source object is copied to the destination object.If copied, the retention period and all legal holds are copied onto the new object. The legal hold date's is set to the date of the copy.

" + } + }, + "RetentionExpirationDate": { + "base": null, + "refs": { + "CompleteMultipartUploadRequest$RetentionExpirationDate":"

Date on which it will be legal to delete or modify the object. This field can only be specified if Retention-Directive is REPLACE. You can only specify this or the Retention-Period header. If both are specified a 400 error will be returned. If neither is specified the bucket's DefaultRetention period will be used.

", + "CopyObjectRequest$RetentionExpirationDate":"

Date on which it will be legal to delete or modify the object. This field can only be specified if Retention-Directive is REPLACE. You can only specify this or the Retention-Period header. If both are specified a 400 error will be returned. If neither is specified the bucket's DefaultRetention period will be used.

", + "GetObjectOutput$RetentionExpirationDate":"

Date on which it will be legal to delete or modify the object. You can only specify this or the Retention-Period header. If both are specified a 400 error will be returned. If neither is specified the bucket's DefaultRetention period will be used.

", + "HeadObjectOutput$RetentionExpirationDate":"

Date on which it will be legal to delete or modify the object. You can only specify this or the Retention-Period header. If both are specified a 400 error will be returned. If neither is specified the bucket's DefaultRetention period will be used.

", + "PutObjectRequest$RetentionExpirationDate":"

Date on which it will be legal to delete or modify the object. This field can only be specified if Retention-Directive is REPLACE. You can only specify this or the Retention-Period header. If both are specified a 400 error will be returned. If neither is specified the bucket's DefaultRetention period will be used.

" + } + }, + "RetentionPeriod": { + "base": "

Retention period to store on the object in seconds. The object can be neither overwritten nor deleted until the amount of time specified in the retention period has elapsed. If this field and Retention-Expiration-Date are specified a 400 error is returned. If neither is specified the bucket's DefaultRetention period will be used. 0 is a legal value assuming the bucket's minimum retention period is also 0.

", + "refs": { + "ListLegalHolds$RetentionPeriod": "

Retention period in seconds.

", + "HeadObjectOutput$RetentionPeriod":"

Retention period to store on the object in seconds. If this field and Retention-Expiration-Date are specified a 400 error is returned. If neither is specified the bucket's DefaultRetention period will be used. 0 is a legal value assuming the bucket's minimum retention period is also 0.

", + "GetObjectOutput$RetentionPeriod":"

Retention period to store on the object in seconds. If this field and Retention-Expiration-Date are specified a 400 error is returned. If neither is specified the bucket's DefaultRetention period will be used. 0 is a legal value assuming the bucket's minimum retention period is also 0.

", + "CompleteMultipartUploadRequest$RetentionPeriod":"

Retention period to store on the object in seconds. If this field and Retention-Expiration-Date are specified a 400 error is returned. If neither is specified the bucket's DefaultRetention period will be used. 0 is a legal value assuming the bucket's minimum retention period is also 0.

", + "CopyObjectOutput$RetentionPeriod":"

Retention period to store on the object in seconds. This field can only be specified if Retention-Directive is REPLACE. If this field and Retention-Expiration-Date are specified a 400 error is returned. If neither is specified the bucket's DefaultRetention period will be used. 0 is a legal value assuming the bucket's minimum retention period is also 0.

", + "PutObjectRequest$RetentionPeriod":"

Retention period to store on the object in seconds. If this field and Retention-Expiration-Date are specified a 400 error is returned. If neither is specified the bucket's DefaultRetention period will be used. 0 is a legal value assuming the bucket's minimum retention period is also 0.

" + } + }, + "RetentionPeriodExpirationDate": { + "base": null, + "refs": { + "ListLegalHolds$RetentionPeriodExpirationDate": "

Date on which the retention period will expire.

" + } + }, + "RetentionLegalholdCount": { + "base": null, + "refs": { + "HeadObjectOutput$RetentionLegalholdCount": "

Returns the count of legal holds on the object. If there are no legal holds, the header is not returned

", + "GetObjectOutput$RetentionLegalholdCount": "

Returns the count of legal holds on the object. If there are no legal holds, the header is not returned

" + } + }, + + "RetentionLegalHoldId": { + "base": null, + "refs": { + "CopyObjectRequest$RetentionLegalHoldId": "

A single legal hold to apply to the object. This field can only be specified if Retention-Directive is REPLACE. A legal hold is a character long string of max length 64. The object cannot be overwritten or deleted until all legal holds associated with the object are removed.

", + "CompleteMultipartUploadRequest$RetentionLegalHoldId": "

A single legal hold to apply to the object. This field can only be specified if Retention-Directive is REPLACE. A legal hold is a character long string of max length 64. The object cannot be overwritten or deleted until all legal holds associated with the object are removed.

", + "PutObjectRequest$RetentionLegalHoldId": "

A single legal hold to apply to the object. This field can only be specified if Retention-Directive is REPLACE. A legal hold is a character long string of max length 64. The object cannot be overwritten or deleted until all legal holds associated with the object are removed.

" + } + }, "Role": { "base": null, "refs": { @@ -3280,7 +3433,7 @@ "TransitionList": { "base": null, "refs": { - "LifecycleRule$Transitions": "

" + "LifecycleRule$Transitions": "

Currently only one Transition allowed, also Date and Days fields are mutually exclusive.

" } }, "TransitionStorageClass": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 23bfd6c5..d0daa4c8 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -8,7 +8,7 @@ "dnsSuffix" : "amazonaws.com", "partition" : "aws", "partitionName" : "AWS Standard", - "regionRegex" : "^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$", + "regionRegex" : "^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$", "regions" : { "ap-east-1" : { "description" : "Asia Pacific (Hong Kong)" @@ -46,6 +46,9 @@ "eu-west-3" : { "description" : "EU (Paris)" }, + "me-south-1" : { + "description" : "Middle East (Bahrain)" + }, "sa-east-1" : { "description" : "South America (Sao Paulo)" }, @@ -82,6 +85,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -94,6 +98,7 @@ "protocols" : [ "https" ] }, "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -185,6 +190,12 @@ }, "hostname" : "api.ecr.eu-west-3.amazonaws.com" }, + "me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "hostname" : "api.ecr.me-south-1.amazonaws.com" + }, "sa-east-1" : { "credentialScope" : { "region" : "sa-east-1" @@ -222,6 +233,7 @@ "ap-northeast-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-west-2" : { } @@ -240,6 +252,7 @@ }, "api.sagemaker" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -247,8 +260,11 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-1-fips" : { "credentialScope" : { @@ -293,6 +309,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -321,6 +338,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -328,6 +346,23 @@ "us-west-2" : { } } }, + "appmesh" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "appstream2" : { "defaults" : { "credentialScope" : { @@ -370,6 +405,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "us-east-1" : { }, @@ -394,6 +430,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -425,8 +462,25 @@ "us-west-2" : { } } }, + "backup" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "batch" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -438,6 +492,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -490,6 +545,7 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-southeast-1" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -523,6 +579,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -576,6 +633,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -610,6 +668,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -619,6 +678,7 @@ }, "codebuild" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -626,9 +686,11 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-1-fips" : { @@ -669,6 +731,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -699,6 +762,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-1-fips" : { @@ -830,7 +894,10 @@ }, "comprehendmedical" : { "endpoints" : { + "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -850,6 +917,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -875,11 +943,16 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -912,6 +985,30 @@ "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "datasync-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "datasync-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "datasync-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "datasync-fips.us-west-2.amazonaws.com" + }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -951,6 +1048,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -977,6 +1075,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -998,6 +1097,12 @@ }, "hostname" : "rds.ap-northeast-2.amazonaws.com" }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "rds.ap-southeast-2.amazonaws.com" + }, "eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -1010,6 +1115,12 @@ }, "hostname" : "rds.eu-west-1.amazonaws.com" }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "rds.eu-west-2.amazonaws.com" + }, "us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -1039,6 +1150,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "sa-east-1" : { }, @@ -1060,6 +1172,12 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "dynamodb-fips.ca-central-1.amazonaws.com" + }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, @@ -1072,11 +1190,36 @@ "hostname" : "localhost:8000", "protocols" : [ "http" ] }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "dynamodb-fips.us-east-1.amazonaws.com" + }, "us-east-2" : { }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "dynamodb-fips.us-east-2.amazonaws.com" + }, "us-west-1" : { }, - "us-west-2" : { } + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "dynamodb-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "dynamodb-fips.us-west-2.amazonaws.com" + } } }, "ec2" : { @@ -1096,6 +1239,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1117,6 +1261,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1144,6 +1289,7 @@ }, "hostname" : "elasticache-fips.us-west-1.amazonaws.com" }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1153,6 +1299,7 @@ }, "elasticbeanstalk" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -1164,6 +1311,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1175,12 +1323,14 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -1204,6 +1354,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1231,6 +1382,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { "sslCommonName" : "{service}.{region}.{dnsSuffix}" @@ -1292,6 +1444,7 @@ }, "hostname" : "es-fips.us-west-1.amazonaws.com" }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1313,6 +1466,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1322,6 +1476,7 @@ }, "firehose" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -1329,6 +1484,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -1360,10 +1516,15 @@ "fsx" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -1402,6 +1563,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1411,6 +1573,7 @@ }, "glue" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -1418,9 +1581,11 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -1433,19 +1598,31 @@ }, "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, + "us-east-2" : { }, "us-west-2" : { } }, "isRegionalized" : true }, + "groundstation" : { + "endpoints" : { + "us-east-2" : { }, + "us-west-2" : { } + } + }, "guardduty" : { "defaults" : { "protocols" : [ "https" ] }, "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -1503,7 +1680,9 @@ "ap-south-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -1522,11 +1701,16 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -1540,12 +1724,89 @@ "us-west-2" : { } } }, + "iotevents" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "ioteventsdata" : { + "endpoints" : { + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "data.iotevents.ap-northeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "data.iotevents.ap-southeast-2.amazonaws.com" + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "data.iotevents.eu-central-1.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "data.iotevents.eu-west-1.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "data.iotevents.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "data.iotevents.us-east-2.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "data.iotevents.us-west-2.amazonaws.com" + } + } + }, + "iotthingsgraph" : { + "defaults" : { + "credentialScope" : { + "service" : "iotthingsgraph" + } + }, + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, "kafka" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -1565,6 +1826,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1576,11 +1838,14 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -1598,12 +1863,6 @@ }, "kms" : { "endpoints" : { - "ProdFips" : { - "credentialScope" : { - "region" : "ca-central-1" - }, - "hostname" : "kms-fips.ca-central-1.amazonaws.com" - }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1616,6 +1875,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1623,6 +1883,15 @@ "us-west-2" : { } } }, + "lakeformation" : { + "endpoints" : { + "ap-northeast-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "lambda" : { "endpoints" : { "ap-east-1" : { }, @@ -1637,6 +1906,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1646,16 +1916,23 @@ }, "license-manager" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -1690,6 +1967,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1753,6 +2031,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -1806,6 +2085,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1852,6 +2132,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1863,11 +2144,14 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -1921,6 +2205,12 @@ }, "hostname" : "rds.eu-central-1.amazonaws.com" }, + "eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "hostname" : "rds.eu-north-1.amazonaws.com" + }, "eu-west-1" : { "credentialScope" : { "region" : "eu-west-1" @@ -2032,6 +2322,17 @@ "us-west-2" : { } } }, + "projects.iot1click" : { + "endpoints" : { + "ap-northeast-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "ram" : { "endpoints" : { "ap-northeast-1" : { }, @@ -2041,6 +2342,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -2064,6 +2366,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { "sslCommonName" : "{service}.{dnsSuffix}" @@ -2087,6 +2390,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2099,10 +2403,14 @@ "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -2120,6 +2428,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2130,8 +2439,11 @@ "robomaker" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, + "us-east-2" : { }, "us-west-2" : { } } }, @@ -2187,6 +2499,7 @@ }, "runtime.sagemaker" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -2194,12 +2507,39 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "runtime-fips.sagemaker.us-east-1.amazonaws.com" + }, "us-east-2" : { }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "runtime-fips.sagemaker.us-east-2.amazonaws.com" + }, "us-west-1" : { }, - "us-west-2" : { } + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "runtime-fips.sagemaker.us-west-1.amazonaws.com" + }, + "us-west-2" : { }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "runtime-fips.sagemaker.us-west-2.amazonaws.com" + } } }, "s3" : { @@ -2232,6 +2572,7 @@ }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "s3-external-1" : { "credentialScope" : { "region" : "us-east-1" @@ -2472,6 +2813,7 @@ }, "securityhub" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -2479,6 +2821,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -2619,6 +2962,7 @@ }, "sms" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -2630,6 +2974,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2640,6 +2985,7 @@ "snowball" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2672,6 +3018,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2721,6 +3068,7 @@ }, "hostname" : "sqs-fips.us-west-2.amazonaws.com" }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { "sslCommonName" : "queue.{dnsSuffix}" @@ -2744,6 +3092,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2765,6 +3114,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2774,6 +3124,7 @@ }, "storagegateway" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -2785,6 +3136,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2806,6 +3158,12 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "dynamodb-fips.ca-central-1.amazonaws.com" + }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, @@ -2818,11 +3176,36 @@ "hostname" : "localhost:8000", "protocols" : [ "http" ] }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "dynamodb-fips.us-east-1.amazonaws.com" + }, "us-east-2" : { }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "dynamodb-fips.us-east-2.amazonaws.com" + }, "us-west-1" : { }, - "us-west-2" : { } + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "dynamodb-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "dynamodb-fips.us-west-2.amazonaws.com" + } } }, "sts" : { @@ -2856,6 +3239,12 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "hostname" : "sts.me-south-1.amazonaws.com" + }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-1-fips" : { @@ -2890,8 +3279,14 @@ }, "support" : { "endpoints" : { - "us-east-1" : { } - } + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "support.us-east-1.amazonaws.com" + } + }, + "partitionEndpoint" : "aws-global" }, "swf" : { "endpoints" : { @@ -2907,6 +3302,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2928,6 +3324,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2944,9 +3341,11 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -2958,7 +3357,11 @@ "protocols" : [ "https" ] }, "endpoints" : { + "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, @@ -3000,12 +3403,16 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -3049,6 +3456,7 @@ }, "xray" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -3060,6 +3468,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -3187,7 +3596,8 @@ "protocols" : [ "https" ] }, "endpoints" : { - "cn-north-1" : { } + "cn-north-1" : { }, + "cn-northwest-1" : { } } }, "directconnect" : { @@ -3294,6 +3704,15 @@ "cn-northwest-1" : { } } }, + "greengrass" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { } + }, + "isRegionalized" : true + }, "iam" : { "endpoints" : { "aws-cn-global" : { @@ -3313,7 +3732,8 @@ } }, "endpoints" : { - "cn-north-1" : { } + "cn-north-1" : { }, + "cn-northwest-1" : { } } }, "kinesis" : { @@ -3322,12 +3742,24 @@ "cn-northwest-1" : { } } }, + "kms" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "lambda" : { "endpoints" : { "cn-north-1" : { }, "cn-northwest-1" : { } } }, + "license-manager" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "logs" : { "endpoints" : { "cn-north-1" : { }, @@ -3467,6 +3899,17 @@ "cn-northwest-1" : { } } }, + "support" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "support.cn-north-1.amazonaws.com" + } + }, + "partitionEndpoint" : "aws-cn-global" + }, "swf" : { "endpoints" : { "cn-north-1" : { }, @@ -3549,6 +3992,7 @@ }, "athena" : { "endpoints" : { + "us-gov-east-1" : { }, "us-gov-west-1" : { } } }, @@ -3593,8 +4037,15 @@ "us-gov-west-1" : { } } }, + "codebuild" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "codecommit" : { "endpoints" : { + "us-gov-east-1" : { }, "us-gov-west-1" : { } } }, @@ -3641,6 +4092,17 @@ "us-gov-west-1" : { } } }, + "datasync" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "datasync-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { } + } + }, "directconnect" : { "endpoints" : { "us-gov-east-1" : { }, @@ -3662,6 +4124,12 @@ "dynamodb" : { "endpoints" : { "us-gov-east-1" : { }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "dynamodb.us-gov-east-1.amazonaws.com" + }, "us-gov-west-1" : { }, "us-gov-west-1-fips" : { "credentialScope" : { @@ -3742,6 +4210,7 @@ }, "firehose" : { "endpoints" : { + "us-gov-east-1" : { }, "us-gov-west-1" : { } } }, @@ -3755,9 +4224,19 @@ }, "glue" : { "endpoints" : { + "us-gov-east-1" : { }, "us-gov-west-1" : { } } }, + "greengrass" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "us-gov-west-1" : { } + }, + "isRegionalized" : true + }, "guardduty" : { "defaults" : { "protocols" : [ "https" ] @@ -3767,6 +4246,11 @@ }, "isRegionalized" : true }, + "health" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "iam" : { "endpoints" : { "aws-us-gov-global" : { @@ -3843,6 +4327,7 @@ } }, "endpoints" : { + "us-gov-east-1" : { }, "us-gov-west-1" : { } } }, @@ -3852,6 +4337,16 @@ "us-gov-west-1" : { } } }, + "neptune" : { + "endpoints" : { + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "rds.us-gov-west-1.amazonaws.com" + } + } + }, "organizations" : { "endpoints" : { "aws-us-gov-global" : { @@ -3869,6 +4364,11 @@ "us-gov-west-1" : { } } }, + "ram" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "rds" : { "endpoints" : { "us-gov-east-1" : { }, @@ -3886,6 +4386,18 @@ "us-gov-west-1" : { } } }, + "route53" : { + "endpoints" : { + "aws-us-gov-global" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "route53.us-gov.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-us-gov-global" + }, "runtime.sagemaker" : { "endpoints" : { "us-gov-west-1" : { } @@ -3948,6 +4460,30 @@ } } }, + "secretsmanager" : { + "endpoints" : { + "us-gov-west-1" : { }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "secretsmanager-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "serverlessrepo" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "us-gov-east-1" : { + "protocols" : [ "https" ] + }, + "us-gov-west-1" : { + "protocols" : [ "https" ] + } + } + }, "sms" : { "endpoints" : { "us-gov-east-1" : { }, @@ -4002,6 +4538,12 @@ }, "endpoints" : { "us-gov-east-1" : { }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "dynamodb.us-gov-east-1.amazonaws.com" + }, "us-gov-west-1" : { }, "us-gov-west-1-fips" : { "credentialScope" : { diff --git a/private/model/api/api.go b/private/model/api/api.go index dc5a49fc..375f97df 100644 --- a/private/model/api/api.go +++ b/private/model/api/api.go @@ -555,7 +555,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio {{ if and (.Metadata.JSONVersion) (eq .Metadata.Protocol "json") -}} JSONVersion: "{{ .Metadata.JSONVersion }}", {{- end }} - {{ if .Metadata.TargetPrefix -}} + {{ if and (.Metadata.TargetPrefix) (eq .Metadata.Protocol "json") -}} TargetPrefix: "{{ .Metadata.TargetPrefix }}", {{- end }} }, diff --git a/private/model/api/codegentest/models/restjson/0000-00-00/api-2.json b/private/model/api/codegentest/models/restjson/0000-00-00/api-2.json index 2c3b50d1..7ff2be6a 100644 --- a/private/model/api/codegentest/models/restjson/0000-00-00/api-2.json +++ b/private/model/api/codegentest/models/restjson/0000-00-00/api-2.json @@ -8,7 +8,6 @@ "serviceFullName":"REST JSON Service", "serviceId":"RESTJSONService", "signatureVersion":"v4", - "targetPrefix":"RESTJSONService_00000000", "uid":"RESTJSONService-0000-00-00" }, "operations":{ diff --git a/private/model/api/codegentest/models/restxml/0000-00-00/api-2.json b/private/model/api/codegentest/models/restxml/0000-00-00/api-2.json index 68843ef4..7562f504 100644 --- a/private/model/api/codegentest/models/restxml/0000-00-00/api-2.json +++ b/private/model/api/codegentest/models/restxml/0000-00-00/api-2.json @@ -8,7 +8,6 @@ "serviceFullName":"REST XML Service", "serviceId":"RESTXMLService", "signatureVersion":"v4", - "targetPrefix":"RESTXMLService_00000000", "uid":"RESTXMLService-0000-00-00" }, "operations":{ diff --git a/private/model/api/codegentest/service/restjsonservice/service.go b/private/model/api/codegentest/service/restjsonservice/service.go index ccb3b0d4..2181b1a0 100644 --- a/private/model/api/codegentest/service/restjsonservice/service.go +++ b/private/model/api/codegentest/service/restjsonservice/service.go @@ -61,8 +61,6 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio SigningRegion: signingRegion, Endpoint: endpoint, APIVersion: "0000-00-00", - - TargetPrefix: "RESTJSONService_00000000", }, handlers, ), diff --git a/private/model/api/codegentest/service/restxmlservice/service.go b/private/model/api/codegentest/service/restxmlservice/service.go index a3c4e94e..2d8bba95 100644 --- a/private/model/api/codegentest/service/restxmlservice/service.go +++ b/private/model/api/codegentest/service/restxmlservice/service.go @@ -61,8 +61,6 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio SigningRegion: signingRegion, Endpoint: endpoint, APIVersion: "0000-00-00", - - TargetPrefix: "RESTXMLService_00000000", }, handlers, ), diff --git a/private/model/api/customization_passes.go b/private/model/api/customization_passes.go index 0804d97a..4ff92858 100644 --- a/private/model/api/customization_passes.go +++ b/private/model/api/customization_passes.go @@ -45,32 +45,7 @@ func (a *API) setServiceAliaseName() { // customizationPasses Executes customization logic for the API by package name. func (a *API) customizationPasses() { var svcCustomizations = map[string]func(*API){ - "s3": s3Customizations, - "s3control": s3ControlCustomizations, - "cloudfront": cloudfrontCustomizations, - "rds": rdsCustomizations, - - // Disable endpoint resolving for services that require customer - // to provide endpoint them selves. - "cloudsearchdomain": disableEndpointResolving, - "iotdataplane": disableEndpointResolving, - - // MTurk smoke test is invalid. The service requires AWS account to be - // linked to Amazon Mechanical Turk Account. - "mturk": supressSmokeTest, - - // Backfill the authentication type for cognito identity and sts. - // Removes the need for the customizations in these services. - "cognitoidentity": backfillAuthType("none", - "GetId", - "GetOpenIdToken", - "UnlinkIdentity", - "GetCredentialsForIdentity", - ), - "sts": backfillAuthType("none", - "AssumeRoleWithSAML", - "AssumeRoleWithWebIdentity", - ), + "s3": s3Customizations, } for k := range mergeServices { @@ -110,6 +85,22 @@ func s3Customizations(a *API) { } } + // Decorate member references that are modeled with the wrong type. + // Specifically the case where a member was modeled as a string, but is + // expected to sent across the wire as a base64 value. + // + // e.g. S3's SSECustomerKey and CopySourceSSECustomerKey + for _, refName := range []string{ + "SSECustomerKey", + "CopySourceSSECustomerKey", + } { + if ref, ok := s.MemberRefs[refName]; ok { + ref.CustomTags = append(ref.CustomTags, ShapeTag{ + "marshal-as", "blob", + }) + } + } + // Expires should be a string not time.Time since the format is not // enforced by S3, and any value can be set to this field outside of the SDK. if strings.HasSuffix(name, "Output") { @@ -163,18 +154,6 @@ func s3ControlCustomizations(a *API) { } } -// cloudfrontCustomizations customized the API generation to replace values -// specific to CloudFront. -func cloudfrontCustomizations(a *API) { - // MaxItems members should always be integers - for _, s := range a.Shapes { - if ref, ok := s.MemberRefs["MaxItems"]; ok { - ref.ShapeName = "Integer" - ref.Shape = a.Shapes["Integer"] - } - } -} - // mergeServicesCustomizations references any duplicate shapes from DynamoDB func mergeServicesCustomizations(a *API) { info := mergeServices[a.PackageName()] @@ -203,36 +182,11 @@ func mergeServicesCustomizations(a *API) { } } -// rdsCustomizations are customization for the service/rds. This adds non-modeled fields used for presigning. -func rdsCustomizations(a *API) { - inputs := []string{ - "CopyDBSnapshotInput", - "CreateDBInstanceReadReplicaInput", - "CopyDBClusterSnapshotInput", - "CreateDBClusterInput", - } - for _, input := range inputs { - if ref, ok := a.Shapes[input]; ok { - ref.MemberRefs["SourceRegion"] = &ShapeRef{ - Documentation: docstring(`SourceRegion is the source region where the resource exists. This is not sent over the wire and is only used for presigning. This value should always have the same region as the source ARN.`), - ShapeName: "String", - Shape: a.Shapes["String"], - Ignore: true, - } - ref.MemberRefs["DestinationRegion"] = &ShapeRef{ - Documentation: docstring(`DestinationRegion is used for presigning the request to a given region.`), - ShapeName: "String", - Shape: a.Shapes["String"], - } - } - } -} - func disableEndpointResolving(a *API) { a.Metadata.NoResolveEndpoint = true } -func backfillAuthType(typ string, opNames ...string) func(*API) { +func backfillAuthType(typ AuthType, opNames ...string) func(*API) { return func(a *API) { for _, opName := range opNames { op, ok := a.Operations[opName] diff --git a/private/model/api/docstring.go b/private/model/api/docstring.go index f5889978..f028d0f0 100644 --- a/private/model/api/docstring.go +++ b/private/model/api/docstring.go @@ -3,19 +3,20 @@ package api import ( - "bytes" + "bufio" "encoding/json" "fmt" "html" + "io" "os" "regexp" "strings" xhtml "golang.org/x/net/html" + "golang.org/x/net/html/atom" ) type apiDocumentation struct { - *API Operations map[string]string Service string Shapes map[string]shapeDocumentation @@ -28,7 +29,7 @@ type shapeDocumentation struct { // AttachDocs attaches documentation from a JSON filename. func (a *API) AttachDocs(filename string) { - d := apiDocumentation{API: a} + var d apiDocumentation f, err := os.Open(filename) defer f.Close() @@ -40,39 +41,41 @@ func (a *API) AttachDocs(filename string) { panic(err) } - d.setup() - + d.setup(a) } -func (d *apiDocumentation) setup() { - d.API.Documentation = docstring(d.Service) +func (d *apiDocumentation) setup(a *API) { + a.Documentation = docstring(d.Service) for opName, doc := range d.Operations { - if _, ok := d.API.Operations[opName]; !ok { + if _, ok := a.Operations[opName]; !ok { panic(fmt.Sprintf("%s, doc op %q not found in API op set", - d.API.name, opName), + a.name, opName), ) } - d.API.Operations[opName].Documentation = docstring(doc) + a.Operations[opName].Documentation = docstring(doc) } - for shape, info := range d.Shapes { - if sh := d.API.Shapes[shape]; sh != nil { - sh.Documentation = docstring(info.Base) + for shapeName, docShape := range d.Shapes { + if s, ok := a.Shapes[shapeName]; ok { + s.Documentation = docstring(docShape.Base) } - for ref, doc := range info.Refs { + for ref, doc := range docShape.Refs { if doc == "" { continue } parts := strings.Split(ref, "$") if len(parts) != 2 { - fmt.Fprintf(os.Stderr, "Shape Doc %s has unexpected reference format, %q\n", shape, ref) + fmt.Fprintf(os.Stderr, + "Shape Doc %s has unexpected reference format, %q\n", + shapeName, ref) continue } - if sh := d.API.Shapes[parts[0]]; sh != nil { - if m := sh.MemberRefs[parts[1]]; m != nil { + + if s, ok := a.Shapes[parts[0]]; ok && len(s.MemberRefs) != 0 { + if m, ok := s.MemberRefs[parts[1]]; ok && m.ShapeName == shapeName { m.Documentation = docstring(doc) } } @@ -113,7 +116,6 @@ func docstring(doc string) string { doc = html.UnescapeString(doc) // Replace doc with full name if doc is empty. - doc = strings.TrimSpace(doc) if len(doc) == 0 { doc = fullname } @@ -125,17 +127,6 @@ const ( indent = " " ) -// style is what we want to prefix a string with. -// For instance,
  • Foo
  • Bar
  • , will generate -// * Foo -// * Bar -var style = map[string]string{ - "ul": indent + "* ", - "li": indent + "* ", - "code": indent, - "pre": indent, -} - // commentify converts a string to a Go comment func commentify(doc string) string { if len(doc) == 0 { @@ -160,257 +151,395 @@ func commentify(doc string) string { return "" } -// wrap returns a rewritten version of text to have line breaks -// at approximately length characters. Line breaks will only be -// inserted into whitespace. -func wrap(text string, length int, isIndented bool) string { - var buf bytes.Buffer - var last rune - var lastNL bool - var col int - - for _, c := range text { - switch c { - case '\r': // ignore this - continue // and also don't track `last` - case '\n': // ignore this too, but reset col - if col >= length || last == '\n' { - buf.WriteString("\n") - } - buf.WriteString("\n") - col = 0 - case ' ', '\t': // opportunity to split - if col >= length { - buf.WriteByte('\n') - col = 0 - if isIndented { - buf.WriteString(indent) - col += 3 - } - } else { - // We only want to write a leading space if the col is greater than zero. - // This will provide the proper spacing for documentation. - buf.WriteRune(c) - col++ // count column +func wrap(text string, length int) string { + var b strings.Builder + + s := bufio.NewScanner(strings.NewReader(text)) + for s.Scan() { + line := s.Text() + + // cleanup the line's spaces + var i int + for i = 0; i < len(line); i++ { + c := line[i] + // Ignore leading spaces, e.g indents. + if !(c == ' ' || c == '\t') { + break } - default: - buf.WriteRune(c) - col++ } - lastNL = c == '\n' - _ = lastNL - last = c + line = line[:i] + strings.Join(strings.Fields(line[i:]), " ") + splitLine(&b, line, length) } - return buf.String() + + return strings.TrimRight(b.String(), "\n") } -type tagInfo struct { - tag string - key string - val string - txt string - raw string - closingTag bool +func splitLine(w stringWriter, line string, length int) { + leading := getLeadingWhitespace(line) + + line = line[len(leading):] + length -= len(leading) + + const splitOn = " " + for len(line) > length { + // Find the next whitespace to the length + idx := strings.Index(line[length:], splitOn) + if idx == -1 { + break + } + offset := length + idx + + if v := line[offset+len(splitOn):]; len(v) == 1 && strings.ContainsAny(v, `,.!?'"`) { + // Workaround for long lines with space before the punctuation mark. + break + } + + w.WriteString(leading) + w.WriteString(line[:offset]) + w.WriteByte('\n') + line = strings.TrimLeft(line[offset+len(splitOn):], " \t") + } + + if len(line) > 0 { + w.WriteString(leading) + w.WriteString(line) + } + // Add the newline back in that was stripped out by scanner. + w.WriteByte('\n') +} + +func getLeadingWhitespace(v string) string { + var o strings.Builder + for _, c := range v { + if c == ' ' || c == '\t' { + o.WriteRune(c) + } else { + break + } + } + + return o.String() } // generateDoc will generate the proper doc string for html encoded or plain text doc entries. func generateDoc(htmlSrc string) string { tokenizer := xhtml.NewTokenizer(strings.NewReader(htmlSrc)) - tokens := buildTokenArray(tokenizer) - scopes := findScopes(tokens) - return walk(scopes) -} - -func buildTokenArray(tokenizer *xhtml.Tokenizer) []tagInfo { - tokens := []tagInfo{} - for tt := tokenizer.Next(); tt != xhtml.ErrorToken; tt = tokenizer.Next() { - switch tt { - case xhtml.TextToken: - txt := string(tokenizer.Text()) - if len(tokens) == 0 { - info := tagInfo{ - raw: txt, - } - tokens = append(tokens, info) - } - tn, _ := tokenizer.TagName() - key, val, _ := tokenizer.TagAttr() - info := tagInfo{ - tag: string(tn), - key: string(key), - val: string(val), - txt: txt, - } - tokens = append(tokens, info) - case xhtml.StartTagToken: - tn, _ := tokenizer.TagName() - key, val, _ := tokenizer.TagAttr() - info := tagInfo{ - tag: string(tn), - key: string(key), - val: string(val), - } - tokens = append(tokens, info) - case xhtml.SelfClosingTagToken, xhtml.EndTagToken: - tn, _ := tokenizer.TagName() - key, val, _ := tokenizer.TagAttr() - info := tagInfo{ - tag: string(tn), - key: string(key), - val: string(val), - closingTag: true, - } - tokens = append(tokens, info) - } + var builder strings.Builder + if err := encodeHTMLToText(&builder, tokenizer); err != nil { + panic(fmt.Sprintf("failed to generated docs, %v", err)) } - return tokens -} - -// walk is used to traverse each scoped block. These scoped -// blocks will act as blocked text where we do most of our -// text manipulation. -func walk(scopes [][]tagInfo) string { - doc := "" - // Documentation will be chunked by scopes. - // Meaning, for each scope will be divided by one or more newlines. - for _, scope := range scopes { - indentStr, isIndented := priorityIndentation(scope) - block := "" - href := "" - after := false - level := 0 - lastTag := "" - for _, token := range scope { - if token.closingTag { - endl := closeTag(token, level) - block += endl - level-- - lastTag = "" - } else if token.txt == "" { - if token.val != "" { - href, after = formatText(token, "") - } - if level == 1 && isIndented { - block += indentStr - } - level++ - lastTag = token.tag - } else { - if token.txt != " " { - str, _ := formatText(token, lastTag) - block += str - if after { - block += href - after = false - } - } else { - fmt.Println(token.tag) - str, _ := formatText(tagInfo{}, lastTag) - block += str - } + + return wrap(strings.Trim(builder.String(), "\n"), 72) +} + +type stringWriter interface { + Write([]byte) (int, error) + WriteByte(byte) error + WriteRune(rune) (int, error) + WriteString(string) (int, error) +} + +func encodeHTMLToText(w stringWriter, z *xhtml.Tokenizer) error { + encoder := newHTMLTokenEncoder(w) + defer encoder.Flush() + + for { + tt := z.Next() + if tt == xhtml.ErrorToken { + if err := z.Err(); err == io.EOF { + return nil + } else if err != nil { + return err } } - if !isIndented { - block = strings.TrimPrefix(block, " ") - } - block = wrap(block, 72, isIndented) - doc += block - } - return doc -} - -// closeTag will divide up the blocks of documentation to be formated properly. -func closeTag(token tagInfo, level int) string { - switch token.tag { - case "pre", "li", "div": - return "\n" - case "p", "h1", "h2", "h3", "h4", "h5", "h6": - return "\n\n" - case "code": - // indented code is only at the 0th level. - if level == 0 { - return "\n" + + if err := encoder.Encode(z.Token()); err != nil { + return err } } - return "" } -// formatText will format any sort of text based off of a tag. It will also return -// a boolean to add the string after the text token. -func formatText(token tagInfo, lastTag string) (string, bool) { - switch token.tag { - case "a": - if token.val != "" { - return fmt.Sprintf(" (%s)", token.val), true +type htmlTokenHandler interface { + OnStartTagToken(xhtml.Token) htmlTokenHandler + OnEndTagToken(xhtml.Token, bool) + OnSelfClosingTagToken(xhtml.Token) + OnTextTagToken(xhtml.Token) +} + +type htmlTokenEncoder struct { + w stringWriter + depth int + handlers []tokenHandlerItem + baseHandler tokenHandlerItem +} + +type tokenHandlerItem struct { + handler htmlTokenHandler + depth int +} + +func newHTMLTokenEncoder(w stringWriter) *htmlTokenEncoder { + baseHandler := newBlockTokenHandler(w) + baseHandler.rootBlock = true + + return &htmlTokenEncoder{ + w: w, + baseHandler: tokenHandlerItem{ + handler: baseHandler, + }, + } +} + +func (e *htmlTokenEncoder) Flush() error { + e.baseHandler.handler.OnEndTagToken(xhtml.Token{Type: xhtml.TextToken}, true) + return nil +} + +func (e *htmlTokenEncoder) Encode(token xhtml.Token) error { + h := e.baseHandler + if len(e.handlers) != 0 { + h = e.handlers[len(e.handlers)-1] + } + + switch token.Type { + case xhtml.StartTagToken: + e.depth++ + + next := h.handler.OnStartTagToken(token) + if next != nil { + e.handlers = append(e.handlers, tokenHandlerItem{ + handler: next, + depth: e.depth, + }) } + + case xhtml.EndTagToken: + handlerBlockClosing := e.depth == h.depth + + h.handler.OnEndTagToken(token, handlerBlockClosing) + + // Remove all but the root handler as the handler is no longer needed. + if handlerBlockClosing { + e.handlers = e.handlers[:len(e.handlers)-1] + } + e.depth-- + + case xhtml.SelfClosingTagToken: + h.handler.OnSelfClosingTagToken(token) + + case xhtml.TextToken: + h.handler.OnTextTagToken(token) } - // We don't care about a single space nor no text. - if len(token.txt) == 0 || token.txt == " " { - return "", false + return nil +} + +type baseTokenHandler struct { + w stringWriter +} + +func (e *baseTokenHandler) OnStartTagToken(token xhtml.Token) htmlTokenHandler { return nil } +func (e *baseTokenHandler) OnEndTagToken(token xhtml.Token, blockClosing bool) {} +func (e *baseTokenHandler) OnSelfClosingTagToken(token xhtml.Token) {} +func (e *baseTokenHandler) OnTextTagToken(token xhtml.Token) { + e.w.WriteString(token.Data) +} + +type blockTokenHandler struct { + baseTokenHandler + + rootBlock bool + origWriter stringWriter + strBuilder *strings.Builder + + started bool + newlineBeforeNextBlock bool +} + +func newBlockTokenHandler(w stringWriter) *blockTokenHandler { + strBuilder := &strings.Builder{} + return &blockTokenHandler{ + origWriter: w, + strBuilder: strBuilder, + baseTokenHandler: baseTokenHandler{ + w: strBuilder, + }, + } +} +func (e *blockTokenHandler) OnStartTagToken(token xhtml.Token) htmlTokenHandler { + e.started = true + if e.newlineBeforeNextBlock { + e.w.WriteString("\n") + e.newlineBeforeNextBlock = false } - // Here we want to indent code blocks that are newlines - if lastTag == "code" { - // Greater than one, because we don't care about newlines in the beginning - block := "" - if lines := strings.Split(token.txt, "\n"); len(lines) > 1 { - for _, line := range lines { - block += indent + line - } - block += "\n" - return block, false + switch token.DataAtom { + case atom.A: + return newLinkTokenHandler(e.w, token) + case atom.Ul: + e.w.WriteString("\n") + e.newlineBeforeNextBlock = true + return newListTokenHandler(e.w) + + case atom.Div, atom.Dt, atom.P, atom.H1, atom.H2, atom.H3, atom.H4, atom.H5, atom.H6: + e.w.WriteString("\n") + e.newlineBeforeNextBlock = true + return newBlockTokenHandler(e.w) + + case atom.Pre, atom.Code: + if e.rootBlock { + e.w.WriteString("\n") + e.w.WriteString(indent) + e.newlineBeforeNextBlock = true } + return newBlockTokenHandler(e.w) } - return token.txt, false + + return nil } +func (e *blockTokenHandler) OnEndTagToken(token xhtml.Token, blockClosing bool) { + if !blockClosing { + return + } -// This is a parser to check what type of indention is needed. -func priorityIndentation(blocks []tagInfo) (string, bool) { - if len(blocks) == 0 { - return "", false + e.origWriter.WriteString(e.strBuilder.String()) + if e.newlineBeforeNextBlock { + e.origWriter.WriteString("\n") + e.newlineBeforeNextBlock = false } - v, ok := style[blocks[0].tag] - return v, ok + e.strBuilder.Reset() } -// Divides into scopes based off levels. -// For instance, -//

    Testing123

    • Foo
    -// This has 2 scopes, the

    and

      -func findScopes(tokens []tagInfo) [][]tagInfo { - level := 0 - scope := []tagInfo{} - scopes := [][]tagInfo{} - for _, token := range tokens { - // we will clear empty tagged tokens from the array - txt := strings.TrimSpace(token.txt) - tag := strings.TrimSpace(token.tag) - if len(txt) == 0 && len(tag) == 0 { - continue - } +func (e *blockTokenHandler) OnTextTagToken(token xhtml.Token) { + if e.newlineBeforeNextBlock { + e.w.WriteString("\n") + e.newlineBeforeNextBlock = false + } + if !e.started { + token.Data = strings.TrimLeft(token.Data, " \t\n") + } + if len(token.Data) != 0 { + e.started = true + } + e.baseTokenHandler.OnTextTagToken(token) +} - scope = append(scope, token) +type linkTokenHandler struct { + baseTokenHandler + linkToken xhtml.Token +} - // If it is a closing tag then we check what level - // we are on. If it is 0, then that means we have found a - // scoped block. - if token.closingTag { - level-- - if level == 0 { - scopes = append(scopes, scope) - scope = []tagInfo{} - } - // Check opening tags and increment the level - } else if token.txt == "" { - level++ +func newLinkTokenHandler(w stringWriter, token xhtml.Token) *linkTokenHandler { + return &linkTokenHandler{ + baseTokenHandler: baseTokenHandler{ + w: w, + }, + linkToken: token, + } +} +func (e *linkTokenHandler) OnEndTagToken(token xhtml.Token, blockClosing bool) { + if !blockClosing { + return + } + + if href, ok := getHTMLTokenAttr(e.linkToken.Attr, "href"); ok && len(href) != 0 { + fmt.Fprintf(e.w, " (%s)", strings.TrimSpace(href)) + } +} + +type listTokenHandler struct { + baseTokenHandler + + items int +} + +func newListTokenHandler(w stringWriter) *listTokenHandler { + return &listTokenHandler{ + baseTokenHandler: baseTokenHandler{ + w: w, + }, + } +} +func (e *listTokenHandler) OnStartTagToken(token xhtml.Token) htmlTokenHandler { + switch token.DataAtom { + case atom.Li: + if e.items >= 1 { + e.w.WriteString("\n\n") } + e.items++ + return newListItemTokenHandler(e.w) + } + return nil +} + +func (e *listTokenHandler) OnTextTagToken(token xhtml.Token) { + // Squash whitespace between list and items +} + +type listItemTokenHandler struct { + baseTokenHandler + + origWriter stringWriter + strBuilder *strings.Builder +} + +func newListItemTokenHandler(w stringWriter) *listItemTokenHandler { + strBuilder := &strings.Builder{} + return &listItemTokenHandler{ + origWriter: w, + strBuilder: strBuilder, + baseTokenHandler: baseTokenHandler{ + w: strBuilder, + }, + } +} +func (e *listItemTokenHandler) OnStartTagToken(token xhtml.Token) htmlTokenHandler { + switch token.DataAtom { + case atom.P: + return newBlockTokenHandler(e.w) } - // In this case, we did not run into a closing tag. This would mean - // we have plaintext for documentation. - if len(scopes) == 0 { - scopes = append(scopes, scope) + return nil +} +func (e *listItemTokenHandler) OnEndTagToken(token xhtml.Token, blockClosing bool) { + if !blockClosing { + return + } + + e.origWriter.WriteString(indent + "* ") + e.origWriter.WriteString(strings.TrimSpace(e.strBuilder.String())) +} + +type trimSpaceTokenHandler struct { + baseTokenHandler + + origWriter stringWriter + strBuilder *strings.Builder +} + +func newTrimSpaceTokenHandler(w stringWriter) *trimSpaceTokenHandler { + strBuilder := &strings.Builder{} + return &trimSpaceTokenHandler{ + origWriter: w, + strBuilder: strBuilder, + baseTokenHandler: baseTokenHandler{ + w: strBuilder, + }, + } +} +func (e *trimSpaceTokenHandler) OnEndTagToken(token xhtml.Token, blockClosing bool) { + if !blockClosing { + return + } + + e.origWriter.WriteString(strings.TrimSpace(e.strBuilder.String())) +} + +func getHTMLTokenAttr(attr []xhtml.Attribute, name string) (string, bool) { + for _, a := range attr { + if strings.EqualFold(a.Key, name) { + return a.Val, true + } } - return scopes + return "", false } diff --git a/private/model/api/docstring_test.go b/private/model/api/docstring_test.go index 627ba472..5f5ae394 100644 --- a/private/model/api/docstring_test.go +++ b/private/model/api/docstring_test.go @@ -1,4 +1,4 @@ -// +build 1.6,codegen +// +build go1.8,codegen package api @@ -6,95 +6,76 @@ import ( "testing" ) -func TestNonHTMLDocGen(t *testing.T) { - doc := "Testing 1 2 3" - expected := "// Testing 1 2 3\n" - doc = docstring(doc) - - if expected != doc { - t.Errorf("Expected %s, but received %s", expected, doc) - } -} - -func TestListsHTMLDocGen(t *testing.T) { - doc := "
      • Testing 1 2 3
      • FooBar
      " - expected := "// * Testing 1 2 3\n// * FooBar\n" - doc = docstring(doc) - - if expected != doc { - t.Errorf("Expected %s, but received %s", expected, doc) - } - - doc = "
      • Testing 1 2 3
      • FooBar
      " - expected = "// * Testing 1 2 3\n// * FooBar\n" - doc = docstring(doc) - - if expected != doc { - t.Errorf("Expected %s, but received %s", expected, doc) - } - - // Test leading spaces - doc = "
      • Testing 1 2 3
      • FooBar
      " - doc = docstring(doc) - if expected != doc { - t.Errorf("Expected %s, but received %s", expected, doc) - } - - // Paragraph check - doc = "
      • Testing 1 2 3

      • FooBar

      " - expected = "// * Testing 1 2 3\n// \n// * FooBar\n" - doc = docstring(doc) - if expected != doc { - t.Errorf("Expected %s, but received %s", expected, doc) - } -} - -func TestInlineCodeHTMLDocGen(t *testing.T) { - doc := "
      • Testing: 1 2 3
      • FooBar
      " - expected := "// * Testing: 1 2 3\n// * FooBar\n" - doc = docstring(doc) - - if expected != doc { - t.Errorf("Expected %s, but received %s", expected, doc) - } -} - -func TestInlineCodeInParagraphHTMLDocGen(t *testing.T) { - doc := "

      Testing: 1 2 3

      " - expected := "// Testing: 1 2 3\n" - doc = docstring(doc) - - if expected != doc { - t.Errorf("Expected %s, but received %s", expected, doc) +func TestDocstring(t *testing.T) { + cases := map[string]struct { + In string + Expect string + }{ + "non HTML": { + In: "Testing 1 2 3", + Expect: "// Testing 1 2 3", + }, + "link": { + In: `a link`, + Expect: "// a link (https://example.com)", + }, + "link with space": { + In: `a link`, + Expect: "// a link (https://example.com)", + }, + "list HTML 01": { + In: "
      • Testing 1 2 3
      • FooBar
      ", + Expect: "// * Testing 1 2 3\n// \n// * FooBar", + }, + "list HTML 02": { + In: "
      • Testing 1 2 3
      • FooBar
      ", + Expect: "// * Testing 1 2 3\n// \n// * FooBar", + }, + "list HTML leading spaces": { + In: "
      • Testing 1 2 3
      • FooBar
      ", + Expect: "// * Testing 1 2 3\n// \n// * FooBar", + }, + "list HTML paragraph": { + In: "
      • Testing 1 2 3

      • FooBar

      ", + Expect: "// * Testing 1 2 3\n// \n// * FooBar", + }, + "inline code HTML": { + In: "
      • Testing: 1 2 3
      • FooBar
      ", + Expect: "// * Testing: 1 2 3\n// \n// * FooBar", + }, + "complex list paragraph": { + In: "
      • FOO Bar

      • Xyz ABC

      ", + Expect: "// * FOO Bar\n// \n// * Xyz ABC", + }, + "inline code in paragraph": { + In: "

      Testing: 1 2 3

      ", + Expect: "// Testing: 1 2 3", + }, + "root pre": { + In: "
      Testing
      ", + Expect: "// Testing", + }, + "paragraph": { + In: "

      Testing 1 2 3

      ", + Expect: "// Testing 1 2 3", + }, + "wrap lines": { + In: "CreateSecret SecretListEntry SecretName KmsKeyId", + Expect: "// CreateSecret SecretListEntry SecretName KmsKeyId", + }, + "links with spaces": { + In: "

      Deletes the replication configuration from the bucket. For information about replication configuration, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

      ", + Expect: "// Deletes the replication configuration from the bucket. For information about\n// replication configuration, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)\n// in the Amazon S3 Developer Guide.", + }, } -} - -func TestEmptyPREInlineCodeHTMLDocGen(t *testing.T) { - doc := "
      Testing
      " - expected := "// Testing\n" - doc = docstring(doc) - - if expected != doc { - t.Errorf("Expected %s, but received %s", expected, doc) - } -} - -func TestParagraph(t *testing.T) { - doc := "

      Testing 1 2 3

      " - expected := "// Testing 1 2 3\n" - doc = docstring(doc) - - if expected != doc { - t.Errorf("Expected %s, but received %s", expected, doc) - } -} - -func TestComplexListParagraphCode(t *testing.T) { - doc := "
      • FOO Bar

      • Xyz ABC

      " - expected := "// * FOO Bar\n// \n// * Xyz ABC\n" - doc = docstring(doc) - if expected != doc { - t.Errorf("Expected %s, but received %s", expected, doc) + for name, c := range cases { + t.Run(name, func(t *testing.T) { + t.Log("Input", c.In) + actual := docstring(c.In) + if e, a := c.Expect, actual; e != a { + t.Errorf("expect %q, got %q", e, a) + } + }) } } diff --git a/private/model/api/example.go b/private/model/api/example.go index 0031df5f..afb79115 100644 --- a/private/model/api/example.go +++ b/private/model/api/example.go @@ -54,9 +54,9 @@ var exampleCustomizations = map[string]template.FuncMap{} var exampleTmpls = template.Must(template.New("example").Funcs(exampleFuncMap).Parse(` {{ generateTypes . }} -{{ commentify (wrap .Title 80 false) }} +{{ commentify (wrap .Title 80) }} // -{{ commentify (wrap .Description 80 false) }} +{{ commentify (wrap .Description 80) }} func Example{{ .API.StructName }}_{{ .MethodName }}() { svc := {{ .API.PackageName }}.New(session.Must(session.NewSession())) input := {{ generateExampleInput . }} diff --git a/private/model/api/examples_builder.go b/private/model/api/examples_builder.go index b392390a..7ba4c45b 100644 --- a/private/model/api/examples_builder.go +++ b/private/model/api/examples_builder.go @@ -22,6 +22,6 @@ func (builder defaultExamplesBuilder) Imports(a *API) string { "` + SDKImportRoot + `/aws" "` + SDKImportRoot + `/aws/awserr" "` + SDKImportRoot + `/aws/session" - "` + SDKImportRoot + `/service/` + a.PackageName() + `" + "` + a.ImportPath() + `" ` } diff --git a/private/model/api/operation.go b/private/model/api/operation.go index 4e30e770..6d14a45f 100644 --- a/private/model/api/operation.go +++ b/private/model/api/operation.go @@ -23,9 +23,9 @@ type Operation struct { OutputRef ShapeRef `json:"output"` ErrorRefs []ShapeRef `json:"errors"` Paginator *Paginator - Deprecated bool `json:"deprecated"` - DeprecatedMsg string `json:"deprecatedMessage"` - AuthType string `json:"authtype"` + Deprecated bool `json:"deprecated"` + DeprecatedMsg string `json:"deprecatedMessage"` + AuthType AuthType `json:"authtype"` imports map[string]bool CustomBuildHandlers []string @@ -102,16 +102,25 @@ func (o *Operation) HasOutput() bool { return o.OutputRef.ShapeName != "" } +// AuthType provides the enumeration of AuthType trait. +type AuthType string + +// Enumeration values for AuthType trait +const ( + NoneAuthType AuthType = "none" + V4UnsignedBodyAuthType AuthType = "v4-unsigned-body" +) + // GetSigner returns the signer that should be used for a API request. func (o *Operation) GetSigner() string { buf := bytes.NewBuffer(nil) switch o.AuthType { - case "none": + case NoneAuthType: o.API.AddSDKImport("aws/credentials") buf.WriteString("req.Config.Credentials = credentials.AnonymousCredentials") - case "v4-unsigned-body": + case V4UnsignedBodyAuthType: o.API.AddSDKImport("aws/signer/v4") buf.WriteString("req.Handlers.Sign.Remove(v4.SignRequestHandler)\n") diff --git a/private/model/api/passes_test.go b/private/model/api/passes_test.go index 384c1241..e4f16b07 100644 --- a/private/model/api/passes_test.go +++ b/private/model/api/passes_test.go @@ -120,10 +120,10 @@ func TestUniqueInputAndOutputs(t *testing.T) { }, OutputRef: ShapeRef{ API: a, - ShapeName: op.input, + ShapeName: op.output, Shape: &Shape{ API: a, - ShapeName: op.input, + ShapeName: op.output, }, }, } @@ -145,11 +145,13 @@ func TestUniqueInputAndOutputs(t *testing.T) { a.applyShapeNameAliases() a.createInputOutputShapes() for k, v := range expected { - if a.Operations[k].InputRef.Shape.ShapeName != v[0] { - t.Errorf("Error %s case: Expected %q, but received %q", k, v[0], a.Operations[k].InputRef.Shape.ShapeName) + if e, ac := v[0], a.Operations[k].InputRef.Shape.ShapeName; e != ac { + t.Errorf("Error %s case: Expected %q, but received %q", + k, e, ac) } - if a.Operations[k].OutputRef.Shape.ShapeName != v[1] { - t.Errorf("Error %s case: Expected %q, but received %q", k, v[1], a.Operations[k].OutputRef.Shape.ShapeName) + if e, ac := v[1], a.Operations[k].OutputRef.Shape.ShapeName; e != ac { + t.Errorf("Error %s case: Expected %q, but received %q", + k, e, ac) } } }) diff --git a/private/model/api/s3manger_input.go b/private/model/api/s3manger_input.go index 1635adb6..cba08f29 100644 --- a/private/model/api/s3manger_input.go +++ b/private/model/api/s3manger_input.go @@ -21,8 +21,8 @@ func S3ManagerUploadInputGoCode(a *API) string { } a.resetImports() - a.imports["io"] = true - a.imports["time"] = true + a.AddImport("io") + a.AddImport("time") var w bytes.Buffer if err := s3managerUploadInputTmpl.Execute(&w, s); err != nil { diff --git a/private/model/api/service_name.go b/private/model/api/service_name.go index 63fda4d8..9d9243a9 100644 --- a/private/model/api/service_name.go +++ b/private/model/api/service_name.go @@ -14,77 +14,77 @@ func ServiceName(a *API) string { } var oldServiceNames = map[string]string{ - "migrationhub": "mgh", - "acmpca": "acm-pca", - "acm": "acm", - "alexaforbusiness": "a4b", - "apigateway": "apigateway", - "applicationautoscaling": "autoscaling", - "appstream": "appstream2", - "appsync": "appsync", - "athena": "athena", - "autoscalingplans": "autoscaling", - "autoscaling": "autoscaling", - "batch": "batch", - "budgets": "budgets", - "costexplorer": "ce", - "cloud9": "cloud9", - "clouddirectory": "clouddirectory", - "cloudformation": "cloudformation", - "cloudfront": "cloudfront", - "cloudhsm": "cloudhsm", - "cloudhsmv2": "cloudhsmv2", - "cloudsearch": "cloudsearch", - "cloudsearchdomain": "cloudsearchdomain", - "cloudtrail": "cloudtrail", - "codebuild": "codebuild", - "codecommit": "codecommit", - "codedeploy": "codedeploy", - "codepipeline": "codepipeline", - "codestar": "codestar", - "cognitoidentity": "cognito-identity", - "cognitoidentityprovider": "cognito-idp", - "cognitosync": "cognito-sync", - "comprehend": "comprehend", - "configservice": "config", - "connect": "connect", - "costandusagereportservice": "cur", - "datapipeline": "datapipeline", - "dax": "dax", - "devicefarm": "devicefarm", - "directconnect": "directconnect", - "applicationdiscoveryservice": "discovery", - "databasemigrationservice": "dms", - "directoryservice": "ds", - "dynamodb": "dynamodb", - "ec2": "ec2", - "ecr": "ecr", - "ecs": "ecs", - "eks": "eks", - "elasticache": "elasticache", - "elasticbeanstalk": "elasticbeanstalk", - "efs": "elasticfilesystem", - "elb": "elasticloadbalancing", - "elbv2": "elasticloadbalancing", - "emr": "elasticmapreduce", - "elastictranscoder": "elastictranscoder", - "ses": "email", - "marketplaceentitlementservice": "entitlement.marketplace", - "elasticsearchservice": "es", - "cloudwatchevents": "events", - "firehose": "firehose", - "fms": "fms", - "gamelift": "gamelift", - "glacier": "glacier", - "glue": "glue", - "greengrass": "greengrass", - "guardduty": "guardduty", - "health": "health", - "iam": "iam", - "inspector": "inspector", - "iotdataplane": "data.iot", - "iotjobsdataplane": "data.jobs.iot", - "iot": "iot", + "migrationhub": "mgh", + "acmpca": "acm-pca", + "acm": "acm", + "alexaforbusiness": "a4b", + "apigateway": "apigateway", + "applicationautoscaling": "autoscaling", + "appstream": "appstream2", + "appsync": "appsync", + "athena": "athena", + "autoscalingplans": "autoscaling", + "autoscaling": "autoscaling", + "batch": "batch", + "budgets": "budgets", + "costexplorer": "ce", + "cloud9": "cloud9", + "clouddirectory": "clouddirectory", + "cloudformation": "cloudformation", + "cloudfront": "cloudfront", + "cloudhsm": "cloudhsm", + "cloudhsmv2": "cloudhsmv2", + "cloudsearch": "cloudsearch", + "cloudsearchdomain": "cloudsearchdomain", + "cloudtrail": "cloudtrail", + "codebuild": "codebuild", + "codecommit": "codecommit", + "codedeploy": "codedeploy", + "codepipeline": "codepipeline", + "codestar": "codestar", + "cognitoidentity": "cognito-identity", + "cognitoidentityprovider": "cognito-idp", + "cognitosync": "cognito-sync", + "comprehend": "comprehend", + "configservice": "config", + "connect": "connect", + "costandusagereportservice": "cur", + "datapipeline": "datapipeline", + "dax": "dax", + "devicefarm": "devicefarm", + "directconnect": "directconnect", + "applicationdiscoveryservice": "discovery", + "databasemigrationservice": "dms", + "directoryservice": "ds", + "dynamodb": "dynamodb", + "ec2": "ec2", + "ecr": "ecr", + "ecs": "ecs", + "eks": "eks", + "elasticache": "elasticache", + "elasticbeanstalk": "elasticbeanstalk", + "efs": "elasticfilesystem", + "elb": "elasticloadbalancing", + "elbv2": "elasticloadbalancing", + "emr": "elasticmapreduce", + "elastictranscoder": "elastictranscoder", + "ses": "email", + "marketplaceentitlementservice": "entitlement.marketplace", + "elasticsearchservice": "es", + "cloudwatchevents": "events", + "firehose": "firehose", + "fms": "fms", + "gamelift": "gamelift", + "glacier": "glacier", + "glue": "glue", + "greengrass": "greengrass", + "guardduty": "guardduty", + "health": "health", + "iam": "iam", + "inspector": "inspector", + "iotdataplane": "data.iot", + "iotjobsdataplane": "data.jobs.iot", + "iot": "iot", "iot1clickdevicesservice": "devices.iot1click", "iot1clickprojects": "projects.iot1click", "iotanalytics": "iotanalytics", diff --git a/private/model/api/shape.go b/private/model/api/shape.go index 3838686b..8e7462e8 100644 --- a/private/model/api/shape.go +++ b/private/model/api/shape.go @@ -57,6 +57,9 @@ type ShapeRef struct { IsEventPayload bool `json:"eventpayload"` IsEventHeader bool `json:"eventheader"` + + // Collection of custom tags the shape reference includes. + CustomTags ShapeTags } // A Shape defines the definition of a shape type @@ -434,7 +437,7 @@ func (s ShapeTags) String() string { // GoTags returns the rendered tags string for the ShapeRef func (ref *ShapeRef) GoTags(toplevel bool, isRequired bool) string { - tags := ShapeTags{} + tags := append(ShapeTags{}, ref.CustomTags...) if ref.Location != "" { tags = append(tags, ShapeTag{"location", ref.Location}) diff --git a/private/protocol/ec2query/build.go b/private/protocol/ec2query/build.go index 2b577bcf..e8c142b7 100644 --- a/private/protocol/ec2query/build.go +++ b/private/protocol/ec2query/build.go @@ -21,7 +21,8 @@ func Build(r *request.Request) { "Version": {r.ClientInfo.APIVersion}, } if err := queryutil.Parse(body, r.Params, true); err != nil { - r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err) + r.Error = awserr.New(request.ErrCodeSerialization, + "failed encoding EC2 Query request", err) } if !r.IsPresigned() { diff --git a/private/protocol/ec2query/unmarshal.go b/private/protocol/ec2query/unmarshal.go index ddd582fb..382d6d9f 100644 --- a/private/protocol/ec2query/unmarshal.go +++ b/private/protocol/ec2query/unmarshal.go @@ -4,7 +4,6 @@ package ec2query import ( "encoding/xml" - "io" "github.com/IBM/ibm-cos-sdk-go/aws/awserr" "github.com/IBM/ibm-cos-sdk-go/aws/request" @@ -28,7 +27,8 @@ func Unmarshal(r *request.Request) { err := xmlutil.UnmarshalXML(r.Data, decoder, "") if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed decoding EC2 Query response", err), + awserr.New(request.ErrCodeSerialization, + "failed decoding EC2 Query response", err), r.HTTPResponse.StatusCode, r.RequestID, ) @@ -39,7 +39,11 @@ func Unmarshal(r *request.Request) { // UnmarshalMeta unmarshals response headers for the EC2 protocol. func UnmarshalMeta(r *request.Request) { - // TODO implement unmarshaling of request IDs + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } } type xmlErrorResponse struct { @@ -53,19 +57,21 @@ type xmlErrorResponse struct { func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - resp := &xmlErrorResponse{} - err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) - if err != nil && err != io.EOF { + var respErr xmlErrorResponse + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) + if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed decoding EC2 Query error response", err), + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), r.HTTPResponse.StatusCode, r.RequestID, ) - } else { - r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), - r.HTTPResponse.StatusCode, - resp.RequestID, - ) + return } + + r.Error = awserr.NewRequestFailure( + awserr.New(respErr.Code, respErr.Message, nil), + r.HTTPResponse.StatusCode, + respErr.RequestID, + ) } diff --git a/private/protocol/ec2query/unmarshal_error_test.go b/private/protocol/ec2query/unmarshal_error_test.go new file mode 100644 index 00000000..e732f103 --- /dev/null +++ b/private/protocol/ec2query/unmarshal_error_test.go @@ -0,0 +1,82 @@ +// +build go1.8 + +package ec2query + +import ( + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +func TestUnmarshalError(t *testing.T) { + cases := map[string]struct { + Request *request.Request + Code, Msg string + ReqID string + Status int + }{ + "ErrorResponse": { + Request: &request.Request{ + HTTPResponse: &http.Response{ + StatusCode: 400, + Header: http.Header{}, + Body: ioutil.NopCloser(strings.NewReader( + ` + + + codeAbc + msg123 + + + reqID123 + `)), + }, + }, + Code: "codeAbc", Msg: "msg123", + Status: 400, ReqID: "reqID123", + }, + "unknown tag": { + Request: &request.Request{ + HTTPResponse: &http.Response{ + StatusCode: 400, + Header: http.Header{}, + Body: ioutil.NopCloser(strings.NewReader( + ` + . + `)), + }, + }, + Code: request.ErrCodeSerialization, + Msg: "failed to unmarshal error message", + Status: 400, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + r := c.Request + UnmarshalError(r) + if r.Error == nil { + t.Fatalf("expect error, got none") + } + + aerr := r.Error.(awserr.RequestFailure) + if e, a := c.Code, aerr.Code(); e != a { + t.Errorf("expect %v code, got %v", e, a) + } + if e, a := c.Msg, aerr.Message(); e != a { + t.Errorf("expect %q message, got %q", e, a) + } + if e, a := c.ReqID, aerr.RequestID(); e != a { + t.Errorf("expect %v request ID, got %v", e, a) + } + if e, a := c.Status, aerr.StatusCode(); e != a { + t.Errorf("expect %v status code, got %v", e, a) + } + }) + } +} diff --git a/private/protocol/json/jsonutil/unmarshal.go b/private/protocol/json/jsonutil/unmarshal.go index b6567e4c..eb2193a1 100644 --- a/private/protocol/json/jsonutil/unmarshal.go +++ b/private/protocol/json/jsonutil/unmarshal.go @@ -1,6 +1,7 @@ package jsonutil import ( + "bytes" "encoding/base64" "encoding/json" "fmt" @@ -9,9 +10,30 @@ import ( "time" "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" "github.com/IBM/ibm-cos-sdk-go/private/protocol" ) +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + // UnmarshalJSON reads a stream and unmarshals the results in object v. func UnmarshalJSON(v interface{}, stream io.Reader) error { var out interface{} diff --git a/private/protocol/jsonrpc/jsonrpc.go b/private/protocol/jsonrpc/jsonrpc.go index e4f24d79..22781c4a 100644 --- a/private/protocol/jsonrpc/jsonrpc.go +++ b/private/protocol/jsonrpc/jsonrpc.go @@ -6,8 +6,6 @@ package jsonrpc //go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go import ( - "encoding/json" - "io" "strings" "github.com/IBM/ibm-cos-sdk-go/aws/awserr" @@ -37,7 +35,7 @@ func Build(req *request.Request) { if req.ParamsFilled() { buf, err = jsonutil.BuildJSON(req.Params) if err != nil { - req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err) + req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err) return } } else { @@ -68,7 +66,7 @@ func Unmarshal(req *request.Request) { err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) if err != nil { req.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed decoding JSON RPC response", err), + awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err), req.HTTPResponse.StatusCode, req.RequestID, ) @@ -87,17 +85,11 @@ func UnmarshalError(req *request.Request) { defer req.HTTPResponse.Body.Close() var jsonErr jsonErrorResponse - err := json.NewDecoder(req.HTTPResponse.Body).Decode(&jsonErr) - if err == io.EOF { + err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body) + if err != nil { req.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", req.HTTPResponse.Status, nil), - req.HTTPResponse.StatusCode, - req.RequestID, - ) - return - } else if err != nil { - req.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed decoding JSON RPC error response", err), + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), req.HTTPResponse.StatusCode, req.RequestID, ) diff --git a/private/protocol/jsonrpc/unmarshal_err_test.go b/private/protocol/jsonrpc/unmarshal_err_test.go new file mode 100644 index 00000000..f8384229 --- /dev/null +++ b/private/protocol/jsonrpc/unmarshal_err_test.go @@ -0,0 +1,79 @@ +// +build go1.8 + +package jsonrpc + +import ( + "bytes" + "encoding/hex" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +func TestUnmarshalError_SerializationError(t *testing.T) { + cases := map[string]struct { + Request *request.Request + ExpectMsg string + ExpectBytes []byte + }{ + "empty body": { + Request: &request.Request{ + Data: &struct{}{}, + HTTPResponse: &http.Response{ + StatusCode: 400, + Header: http.Header{ + "X-Amzn-Requestid": []string{"abc123"}, + }, + Body: ioutil.NopCloser( + bytes.NewReader([]byte{}), + ), + }, + }, + ExpectMsg: "error message missing", + }, + "HTML body": { + Request: &request.Request{ + Data: &struct{}{}, + HTTPResponse: &http.Response{ + StatusCode: 400, + Header: http.Header{ + "X-Amzn-Requestid": []string{"abc123"}, + }, + Body: ioutil.NopCloser( + bytes.NewReader([]byte(``)), + ), + }, + }, + ExpectBytes: []byte(``), + ExpectMsg: "failed decoding", + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + req := c.Request + + UnmarshalError(req) + if req.Error == nil { + t.Fatal("expect error, got none") + } + + aerr := req.Error.(awserr.RequestFailure) + if e, a := request.ErrCodeSerialization, aerr.Code(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + + uerr := aerr.OrigErr().(awserr.UnmarshalError) + if e, a := c.ExpectMsg, uerr.Message(); !strings.Contains(a, e) { + t.Errorf("Expect %q, in %q", e, a) + } + if e, a := c.ExpectBytes, uerr.Bytes(); !bytes.Equal(e, a) { + t.Errorf("expect:\n%v\nactual:\n%v", hex.Dump(e), hex.Dump(a)) + } + }) + } +} diff --git a/private/protocol/query/build.go b/private/protocol/query/build.go index 0624f772..093efc1d 100644 --- a/private/protocol/query/build.go +++ b/private/protocol/query/build.go @@ -21,7 +21,7 @@ func Build(r *request.Request) { "Version": {r.ClientInfo.APIVersion}, } if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) return } diff --git a/private/protocol/query/unmarshal.go b/private/protocol/query/unmarshal.go index 0a470e3d..d558aa96 100644 --- a/private/protocol/query/unmarshal.go +++ b/private/protocol/query/unmarshal.go @@ -24,7 +24,7 @@ func Unmarshal(r *request.Request) { err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed decoding Query response", err), + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/private/protocol/query/unmarshal_error.go b/private/protocol/query/unmarshal_error.go index cfc83d0e..9d6f859d 100644 --- a/private/protocol/query/unmarshal_error.go +++ b/private/protocol/query/unmarshal_error.go @@ -2,73 +2,68 @@ package query import ( "encoding/xml" - "io/ioutil" + "fmt" "github.com/IBM/ibm-cos-sdk-go/aws/awserr" "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil" ) +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + type xmlErrorResponse struct { - XMLName xml.Name `xml:"ErrorResponse"` - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` } -type xmlServiceUnavailableResponse struct { - XMLName xml.Name `xml:"ServiceUnavailableException"` +type xmlResponseError struct { + xmlErrorResponse } -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} // UnmarshalError unmarshals an error response for an AWS Query service. func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed to read from query HTTP response body", err), + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), r.HTTPResponse.StatusCode, r.RequestID, ) return } - // First check for specific error - resp := xmlErrorResponse{} - decodeErr := xml.Unmarshal(bodyBytes, &resp) - if decodeErr == nil { - reqID := resp.RequestID - if reqID == "" { - reqID = r.RequestID - } - r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), - r.HTTPResponse.StatusCode, - reqID, - ) - return - } - - // Check for unhandled error - servUnavailResp := xmlServiceUnavailableResponse{} - unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) - if unavailErr == nil { - r.Error = awserr.NewRequestFailure( - awserr.New("ServiceUnavailableException", "service is unavailable", nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID } - // Failed to retrieve any error message from the response body r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", - "failed to decode query XML error response", decodeErr), + awserr.New(respErr.Code, respErr.Message, nil), r.HTTPResponse.StatusCode, - r.RequestID, + reqID, ) } diff --git a/private/protocol/query/unmarshal_error_test.go b/private/protocol/query/unmarshal_error_test.go new file mode 100644 index 00000000..587f883d --- /dev/null +++ b/private/protocol/query/unmarshal_error_test.go @@ -0,0 +1,94 @@ +// +build go1.8 + +package query + +import ( + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +func TestUnmarshalError(t *testing.T) { + cases := map[string]struct { + Request *request.Request + Code, Msg string + ReqID string + Status int + }{ + "ErrorResponse": { + Request: &request.Request{ + HTTPResponse: &http.Response{ + StatusCode: 400, + Header: http.Header{}, + Body: ioutil.NopCloser(strings.NewReader( + ` + + codeAbcmsg123 + + reqID123 + `)), + }, + }, + Code: "codeAbc", Msg: "msg123", + Status: 400, ReqID: "reqID123", + }, + "ServiceUnavailableException": { + Request: &request.Request{ + HTTPResponse: &http.Response{ + StatusCode: 502, + Header: http.Header{}, + Body: ioutil.NopCloser(strings.NewReader( + ` + else + `)), + }, + }, + Code: "ServiceUnavailableException", + Msg: "service is unavailable", + Status: 502, + }, + "unknown tag": { + Request: &request.Request{ + HTTPResponse: &http.Response{ + StatusCode: 400, + Header: http.Header{}, + Body: ioutil.NopCloser(strings.NewReader( + ` + . + `)), + }, + }, + Code: request.ErrCodeSerialization, + Msg: "failed to unmarshal error message", + Status: 400, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + r := c.Request + UnmarshalError(r) + if r.Error == nil { + t.Fatalf("expect error, got none") + } + + aerr := r.Error.(awserr.RequestFailure) + if e, a := c.Code, aerr.Code(); e != a { + t.Errorf("expect %v code, got %v", e, a) + } + if e, a := c.Msg, aerr.Message(); e != a { + t.Errorf("expect %q message, got %q", e, a) + } + if e, a := c.ReqID, aerr.RequestID(); e != a { + t.Errorf("expect %v request ID, got %v", e, a) + } + if e, a := c.Status, aerr.StatusCode(); e != a { + t.Errorf("expect %v status code, got %v", e, a) + } + }) + } +} diff --git a/private/protocol/rest/build.go b/private/protocol/rest/build.go index ec411f87..28908d42 100644 --- a/private/protocol/rest/build.go +++ b/private/protocol/rest/build.go @@ -25,6 +25,8 @@ var noEscape [256]bool var errValueNotSet = fmt.Errorf("value not set") +var byteSliceType = reflect.TypeOf([]byte{}) + func init() { for i := 0; i < len(noEscape); i++ { // AWS expects every character except these to be escaped @@ -94,6 +96,14 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo continue } + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as stirng but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + var err error switch field.Tag.Get("location") { case "headers": // header maps @@ -137,7 +147,7 @@ func buildBody(r *request.Request, v reflect.Value) { case string: r.SetStringBody(reader) default: - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to encode REST request", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -152,7 +162,7 @@ func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect. if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } name = strings.TrimSpace(name) @@ -170,7 +180,7 @@ func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) if err == errValueNotSet { continue } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } keyStr := strings.TrimSpace(key.String()) @@ -186,7 +196,7 @@ func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) e if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) @@ -219,7 +229,7 @@ func buildQueryString(query url.Values, v reflect.Value, name string, tag reflec if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } query.Set(name, str) } diff --git a/private/protocol/rest/unmarshal.go b/private/protocol/rest/unmarshal.go index f1d1bd58..c02bd775 100644 --- a/private/protocol/rest/unmarshal.go +++ b/private/protocol/rest/unmarshal.go @@ -57,7 +57,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } else { payload.Set(reflect.ValueOf(b)) } @@ -65,7 +65,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } else { str := string(b) payload.Set(reflect.ValueOf(&str)) @@ -77,7 +77,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { case "io.ReadSeeker": b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to read response body", err) return } @@ -85,7 +85,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { default: io.Copy(ioutil.Discard, r.HTTPResponse.Body) defer r.HTTPResponse.Body.Close() - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -115,14 +115,14 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) { case "header": err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) break } case "headers": prefix := field.Tag.Get("locationName") err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) break } } @@ -202,7 +202,7 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro if len(format) == 0 { format = protocol.RFC822TimeFormatName } - t, err := protocol.ParseTime(format, header) + t, err := protocol.ParseIbmTime(format, header) if err != nil { return err } diff --git a/private/protocol/restjson/restjson.go b/private/protocol/restjson/restjson.go index ba8abae0..9e34d6d7 100644 --- a/private/protocol/restjson/restjson.go +++ b/private/protocol/restjson/restjson.go @@ -6,12 +6,11 @@ package restjson //go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go import ( - "encoding/json" - "io" "strings" "github.com/IBM/ibm-cos-sdk-go/aws/awserr" "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil" "github.com/IBM/ibm-cos-sdk-go/private/protocol/jsonrpc" "github.com/IBM/ibm-cos-sdk-go/private/protocol/rest" ) @@ -59,17 +58,11 @@ func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() var jsonErr jsonErrorResponse - err := json.NewDecoder(r.HTTPResponse.Body).Decode(&jsonErr) - if err == io.EOF { + err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body) + if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", r.HTTPResponse.Status, nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } else if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed decoding REST JSON error response", err), + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/private/protocol/restjson/unmarshal_error_test.go b/private/protocol/restjson/unmarshal_error_test.go new file mode 100644 index 00000000..deed1a74 --- /dev/null +++ b/private/protocol/restjson/unmarshal_error_test.go @@ -0,0 +1,79 @@ +// +build go1.8 + +package restjson + +import ( + "bytes" + "encoding/hex" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +func TestUnmarshalError_SerializationError(t *testing.T) { + cases := map[string]struct { + Request *request.Request + ExpectMsg string + ExpectBytes []byte + }{ + "empty body": { + Request: &request.Request{ + Data: &struct{}{}, + HTTPResponse: &http.Response{ + StatusCode: 400, + Header: http.Header{ + "X-Amzn-Requestid": []string{"abc123"}, + }, + Body: ioutil.NopCloser( + bytes.NewReader([]byte{}), + ), + }, + }, + ExpectMsg: "error message missing", + }, + "HTML body": { + Request: &request.Request{ + Data: &struct{}{}, + HTTPResponse: &http.Response{ + StatusCode: 400, + Header: http.Header{ + "X-Amzn-Requestid": []string{"abc123"}, + }, + Body: ioutil.NopCloser( + bytes.NewReader([]byte(``)), + ), + }, + }, + ExpectBytes: []byte(``), + ExpectMsg: "failed decoding", + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + req := c.Request + + UnmarshalError(req) + if req.Error == nil { + t.Fatal("expect error, got none") + } + + aerr := req.Error.(awserr.RequestFailure) + if e, a := request.ErrCodeSerialization, aerr.Code(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + + uerr := aerr.OrigErr().(awserr.UnmarshalError) + if e, a := c.ExpectMsg, uerr.Message(); !strings.Contains(a, e) { + t.Errorf("Expect %q, in %q", e, a) + } + if e, a := c.ExpectBytes, uerr.Bytes(); !bytes.Equal(e, a) { + t.Errorf("expect:\n%v\nactual:\n%v", hex.Dump(e), hex.Dump(a)) + } + }) + } +} diff --git a/private/protocol/restxml/restxml.go b/private/protocol/restxml/restxml.go index e9b3f062..167f9b62 100644 --- a/private/protocol/restxml/restxml.go +++ b/private/protocol/restxml/restxml.go @@ -37,7 +37,8 @@ func Build(r *request.Request) { err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed to encode rest XML request", err), + awserr.New(request.ErrCodeSerialization, + "failed to encode rest XML request", err), r.HTTPResponse.StatusCode, r.RequestID, ) @@ -55,7 +56,8 @@ func Unmarshal(r *request.Request) { err := xmlutil.UnmarshalXML(r.Data, decoder, "") if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed to decode REST XML response", err), + awserr.New(request.ErrCodeSerialization, + "failed to decode REST XML response", err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/private/protocol/timestamp.go b/private/protocol/timestamp.go index b7ed6c6f..275e396d 100644 --- a/private/protocol/timestamp.go +++ b/private/protocol/timestamp.go @@ -70,3 +70,16 @@ func ParseTime(formatName, value string) (time.Time, error) { panic("unknown timestamp format name, " + formatName) } } + +// ParseIbmTime - checks to see if first character of date string is a letter +// if so it tries to parse it as an RFC822 formatted date +func ParseIbmTime(formatName, value string) (time.Time, error) { + if formatName == ISO8601TimeFormatName && len(value) != 0 { + ch := value[0] + if ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') { + formatName = RFC822TimeFormatName + } + } + return ParseTime(formatName, value) + +} diff --git a/private/protocol/unmarshal_test.go b/private/protocol/unmarshal_test.go index 7f544983..26d5353c 100644 --- a/private/protocol/unmarshal_test.go +++ b/private/protocol/unmarshal_test.go @@ -76,7 +76,7 @@ func TestUnmarshalSeriaizationError(t *testing.T) { }, unmarshalFn: jsonrpc.Unmarshal, expectedError: awserr.NewRequestFailure( - awserr.New("SerializationError", "", nil), + awserr.New(request.ErrCodeSerialization, "", nil), 502, "", ), @@ -92,7 +92,7 @@ func TestUnmarshalSeriaizationError(t *testing.T) { }, unmarshalFn: ec2query.Unmarshal, expectedError: awserr.NewRequestFailure( - awserr.New("SerializationError", "", nil), + awserr.New(request.ErrCodeSerialization, "", nil), 111, "", ), @@ -111,7 +111,7 @@ func TestUnmarshalSeriaizationError(t *testing.T) { }, unmarshalFn: query.Unmarshal, expectedError: awserr.NewRequestFailure( - awserr.New("SerializationError", "", nil), + awserr.New(request.ErrCodeSerialization, "", nil), 1, "", ), @@ -127,7 +127,7 @@ func TestUnmarshalSeriaizationError(t *testing.T) { }, unmarshalFn: restjson.Unmarshal, expectedError: awserr.NewRequestFailure( - awserr.New("SerializationError", "", nil), + awserr.New(request.ErrCodeSerialization, "", nil), 123, "", ), @@ -143,7 +143,7 @@ func TestUnmarshalSeriaizationError(t *testing.T) { }, unmarshalFn: restxml.Unmarshal, expectedError: awserr.NewRequestFailure( - awserr.New("SerializationError", "", nil), + awserr.New(request.ErrCodeSerialization, "", nil), 456, "", ), diff --git a/private/protocol/xml/xmlutil/unmarshal.go b/private/protocol/xml/xmlutil/unmarshal.go index cc02ccb2..9c347e51 100644 --- a/private/protocol/xml/xmlutil/unmarshal.go +++ b/private/protocol/xml/xmlutil/unmarshal.go @@ -1,6 +1,7 @@ package xmlutil import ( + "bytes" "encoding/base64" "encoding/xml" "fmt" @@ -10,9 +11,27 @@ import ( "strings" "time" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" "github.com/IBM/ibm-cos-sdk-go/private/protocol" ) +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + // UnmarshalXML deserializes an xml.Decoder into the container v. V // needs to match the shape of the XML expected to be decoded. // If the shape doesn't match unmarshaling will fail. @@ -260,7 +279,7 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { format = protocol.ISO8601TimeFormatName } - t, err := protocol.ParseTime(format, node.Text) + t, err := protocol.ParseIbmTime(format, node.Text) if err != nil { return err } diff --git a/service/kms/api.go b/service/kms/api.go index 0fa9dafb..2ddadfd9 100644 --- a/service/kms/api.go +++ b/service/kms/api.go @@ -193,8 +193,8 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r // During the connection process, AWS KMS finds the AWS CloudHSM cluster that // is associated with the custom key store, creates the connection infrastructure, // connects to the cluster, logs into the AWS CloudHSM client as the kmsuser -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) -// crypto user (CU), and rotates its password. +// crypto user (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) +// (CU), and rotates its password. // // The ConnectCustomKeyStore operation might fail for various reasons. To find // the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode @@ -268,11 +268,9 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r // // * The cluster must contain at least as many HSMs as the operation requires. // To add HSMs, use the AWS CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. -// -// For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey operations, -// the AWS CloudHSM cluster must have at least two active HSMs, each in a -// different Availability Zone. For the ConnectCustomKeyStore operation, +// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey +// operations, the AWS CloudHSM cluster must have at least two active HSMs, +// each in a different Availability Zone. For the ConnectCustomKeyStore operation, // the AWS CloudHSM must contain at least one active HSM. // // For information about the requirements for an AWS CloudHSM cluster that is @@ -282,7 +280,7 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r // Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) // in the AWS CloudHSM User Guide. For information about cluster security groups, // see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the AWS CloudHSM User Guide. +// in the AWS CloudHSM User Guide . // // See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ConnectCustomKeyStore func (c *KMS) ConnectCustomKeyStore(input *ConnectCustomKeyStoreInput) (*ConnectCustomKeyStoreOutput, error) { @@ -575,11 +573,9 @@ func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req // // * The cluster must contain at least as many HSMs as the operation requires. // To add HSMs, use the AWS CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. -// -// For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey operations, -// the AWS CloudHSM cluster must have at least two active HSMs, each in a -// different Availability Zone. For the ConnectCustomKeyStore operation, +// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey +// operations, the AWS CloudHSM cluster must have at least two active HSMs, +// each in a different Availability Zone. For the ConnectCustomKeyStore operation, // the AWS CloudHSM must contain at least one active HSM. // // For information about the requirements for an AWS CloudHSM cluster that is @@ -589,7 +585,7 @@ func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req // Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) // in the AWS CloudHSM User Guide. For information about cluster security groups, // see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the AWS CloudHSM User Guide. +// in the AWS CloudHSM User Guide . // // See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateCustomKeyStore func (c *KMS) CreateCustomKeyStore(input *CreateCustomKeyStoreInput) (*CreateCustomKeyStoreOutput, error) { @@ -668,7 +664,7 @@ func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request, // To perform this operation on a CMK in a different AWS account, specify the // key ARN in the value of the KeyId parameter. For more information about grants, // see Grants (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) -// in the AWS Key Management Service Developer Guide. +// in the AWS Key Management Service Developer Guide . // // The result of this operation varies with the key state of the CMK. For details, // see How Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) @@ -878,11 +874,9 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out // // * The cluster must contain at least as many HSMs as the operation requires. // To add HSMs, use the AWS CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. -// -// For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey operations, -// the AWS CloudHSM cluster must have at least two active HSMs, each in a -// different Availability Zone. For the ConnectCustomKeyStore operation, +// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey +// operations, the AWS CloudHSM cluster must have at least two active HSMs, +// each in a different Availability Zone. For the ConnectCustomKeyStore operation, // the AWS CloudHSM must contain at least one active HSM. // // For information about the requirements for an AWS CloudHSM cluster that is @@ -892,7 +886,7 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out // Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) // in the AWS CloudHSM User Guide. For information about cluster security groups, // see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the AWS CloudHSM User Guide. +// in the AWS CloudHSM User Guide . // // See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateKey func (c *KMS) CreateKey(input *CreateKeyInput) (*CreateKeyOutput, error) { @@ -1671,7 +1665,7 @@ func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, o // // For more information about how key state affects the use of a CMK, see How // Key State Affects the Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the AWS Key Management Service Developer Guide. +// in the AWS Key Management Service Developer Guide . // // The result of this operation varies with the key state of the CMK. For details, // see How Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) @@ -1901,11 +1895,15 @@ func (c *KMS) DisconnectCustomKeyStoreRequest(input *DisconnectCustomKeyStoreInp // operations will fail. This action can prevent users from storing and accessing // sensitive data. // -// To find the connection state of a custom key store, use the DescribeCustomKeyStoresoperation. To reconnect a custom key store, use the ConnectCustomKeyStoreoperation. +// To find the connection state of a custom key store, use the DescribeCustomKeyStores +// operation. To reconnect a custom key store, use the ConnectCustomKeyStore +// operation. // // If the operation succeeds, it returns a JSON object with no properties. // // This operation is part of the Custom Key Store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// feature in AWS KMS, which combines the convenience and extensive integration +// of AWS KMS with the isolation and control of a single-tenant key store. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2420,7 +2418,7 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request. // exact match) in your request to Decrypt the data key. Otherwise, the request // to decrypt fails with an InvalidCiphertextException. For more information, // see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// in the AWS Key Management Service Developer Guide. +// in the AWS Key Management Service Developer Guide . // // The result of this operation varies with the key state of the CMK. For details, // see How Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) @@ -3143,10 +3141,9 @@ func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *requ // When calling this operation, you must specify the following values: // // * The key ID or key ARN of a CMK with no key material. Its Origin must -// be EXTERNAL. -// -// To create a CMK with no key material, call CreateKey and set the value of -// its Origin parameter to EXTERNAL. To get the Origin of a CMK, call DescribeKey.) +// be EXTERNAL. To create a CMK with no key material, call CreateKey and +// set the value of its Origin parameter to EXTERNAL. To get the Origin of +// a CMK, call DescribeKey.) // // * The encrypted key material. To get the public key to encrypt the key // material, call GetParametersForImport. @@ -5089,9 +5086,9 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req // of the custom key store to the value that you specify. // // * Use the KeyStorePassword parameter tell AWS KMS the current password -// of the kmsuser (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) -// crypto user (CU) in the associated AWS CloudHSM cluster. You can use this -// parameter to fix connection failures (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-password) +// of the kmsuser crypto user (CU) (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) +// in the associated AWS CloudHSM cluster. You can use this parameter to +// fix connection failures (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-password) // that occur when AWS KMS cannot log into the associated cluster because // the kmsuser password has changed. This value does not change the password // in the AWS CloudHSM cluster. @@ -5187,11 +5184,9 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req // // * The cluster must contain at least as many HSMs as the operation requires. // To add HSMs, use the AWS CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. -// -// For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey operations, -// the AWS CloudHSM cluster must have at least two active HSMs, each in a -// different Availability Zone. For the ConnectCustomKeyStore operation, +// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey +// operations, the AWS CloudHSM cluster must have at least two active HSMs, +// each in a different Availability Zone. For the ConnectCustomKeyStore operation, // the AWS CloudHSM must contain at least one active HSM. // // For information about the requirements for an AWS CloudHSM cluster that is @@ -5201,7 +5196,7 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req // Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) // in the AWS CloudHSM User Guide. For information about cluster security groups, // see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the AWS CloudHSM User Guide. +// in the AWS CloudHSM User Guide . // // See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateCustomKeyStore func (c *KMS) UpdateCustomKeyStore(input *UpdateCustomKeyStoreInput) (*UpdateCustomKeyStoreOutput, error) { @@ -5600,9 +5595,9 @@ type CreateCustomKeyStoreInput struct { // CustomKeyStoreName is a required field CustomKeyStoreName *string `min:"1" type:"string" required:"true"` - // Enter the password of the kmsuser (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) - // crypto user (CU) account in the specified AWS CloudHSM cluster. AWS KMS logs - // into the cluster as this user to manage key material on your behalf. + // Enter the password of the kmsuser crypto user (CU) account (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) + // in the specified AWS CloudHSM cluster. AWS KMS logs into the cluster as this + // user to manage key material on your behalf. // // This parameter tells AWS KMS the kmsuser account password; it does not change // the password in the AWS CloudHSM cluster. @@ -5715,7 +5710,7 @@ type CreateGrantInput struct { // Allows a cryptographic operation only when the encryption context matches // or includes the encryption context specified in this structure. For more // information about encryption context, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the AWS Key Management Service Developer Guide. + // in the AWS Key Management Service Developer Guide . Constraints *GrantConstraints `type:"structure"` // A list of grant tokens. @@ -5914,7 +5909,7 @@ type CreateKeyInput struct { // Do not set this value to true indiscriminately. // // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the AWS Key Management Service Developer Guide. + // section in the AWS Key Management Service Developer Guide . // // Use this parameter only when you include a policy in the request and you // intend to prevent the principal that is making the request from making a @@ -5978,7 +5973,7 @@ type CreateKeyInput struct { // a subsequent PutKeyPolicy request on the CMK. This reduces the risk that // the CMK becomes unmanageable. For more information, refer to the scenario // in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section of the AWS Key Management Service Developer Guide. + // section of the AWS Key Management Service Developer Guide . // // * Each statement in the key policy must contain one or more principals. // The principals in the key policy must exist and be visible to AWS KMS. @@ -7902,7 +7897,7 @@ func (s *GetParametersForImportOutput) SetPublicKey(v []byte) *GetParametersForI // only by case. To require a fully case-sensitive encryption context, use the // kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM // or key policy. For details, see kms:EncryptionContext: (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-context) -// in the AWS Key Management Service Developer Guide. +// in the AWS Key Management Service Developer Guide . type GrantConstraints struct { _ struct{} `type:"structure"` diff --git a/service/kms/errors.go b/service/kms/errors.go index b2513484..e8ce42f3 100644 --- a/service/kms/errors.go +++ b/service/kms/errors.go @@ -44,11 +44,9 @@ const ( // // * The cluster must contain at least as many HSMs as the operation requires. // To add HSMs, use the AWS CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) - // operation. - // - // For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey operations, - // the AWS CloudHSM cluster must have at least two active HSMs, each in a - // different Availability Zone. For the ConnectCustomKeyStore operation, + // operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey + // operations, the AWS CloudHSM cluster must have at least two active HSMs, + // each in a different Availability Zone. For the ConnectCustomKeyStore operation, // the AWS CloudHSM must contain at least one active HSM. // // For information about the requirements for an AWS CloudHSM cluster that is @@ -58,7 +56,7 @@ const ( // Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) // in the AWS CloudHSM User Guide. For information about cluster security groups, // see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) - // in the AWS CloudHSM User Guide. + // in the AWS CloudHSM User Guide . ErrCodeCloudHsmClusterInvalidConfigurationException = "CloudHsmClusterInvalidConfigurationException" // ErrCodeCloudHsmClusterNotActiveException for service response error code diff --git a/service/s3/api.go b/service/s3/api.go index 46751bbb..1ed2380b 100644 --- a/service/s3/api.go +++ b/service/s3/api.go @@ -97,6 +97,86 @@ func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultip return out, req.Send() } +const opAddLegalHold = "AddLegalHold" + +// AddLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the AddLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AddLegalHold for more information on using the AddLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AddLegalHoldRequest method. +// req, resp := client.AddLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AddLegalHold +func (c *S3) AddLegalHoldRequest(input *AddLegalHoldInput) (req *request.Request, output *AddLegalHoldOutput) { + op := &request.Operation{ + Name: opAddLegalHold, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?legalHold", + } + + if input == nil { + input = &AddLegalHoldInput{} + } + + output = &AddLegalHoldOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AddLegalHold API operation for Amazon Simple Storage Service. +// +// Add a legal hold on an object. The legal hold identifiers are stored in the +// object metadata along with the timestamp of when they are POSTed to the object. +// The presence of any legal hold identifiers prevents the modification or deletion +// of the object data, even if the retention period has expired. Legal Holds +// can only be added to objects in a bucket with a protection policy. Otherwise +// a 400 error will be returned. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation AddLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AddLegalHold +func (c *S3) AddLegalHold(input *AddLegalHoldInput) (*AddLegalHoldOutput, error) { + req, out := c.AddLegalHoldRequest(input) + return out, req.Send() +} + +// AddLegalHoldWithContext is the same as AddLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See AddLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AddLegalHoldWithContext(ctx aws.Context, input *AddLegalHoldInput, opts ...request.Option) (*AddLegalHoldOutput, error) { + req, out := c.AddLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCompleteMultipartUpload = "CompleteMultipartUpload" // CompleteMultipartUploadRequest generates a "aws/request.Request" representing the @@ -564,6 +644,159 @@ func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCor return out, req.Send() } +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + output = &DeleteBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deletes the lifecycle configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + return out, req.Send() +} + +// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLegalHold = "DeleteLegalHold" + +// DeleteLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLegalHold for more information on using the DeleteLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLegalHoldRequest method. +// req, resp := client.DeleteLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteLegalHold +func (c *S3) DeleteLegalHoldRequest(input *DeleteLegalHoldInput) (req *request.Request, output *DeleteLegalHoldOutput) { + op := &request.Operation{ + Name: opDeleteLegalHold, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?legalHold", + } + + if input == nil { + input = &DeleteLegalHoldInput{} + } + + output = &DeleteLegalHoldOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteLegalHold API operation for Amazon Simple Storage Service. +// +// Remove Legal hold on an object. The legal hold identifiers are stored in +// the object metadata along with the timestamp of when they are POSTed to the +// object. The presence of any legal hold identifiers prevents the modification +// or deletion of the object data, even if the retention period has expired. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteLegalHold +func (c *S3) DeleteLegalHold(input *DeleteLegalHoldInput) (*DeleteLegalHoldOutput, error) { + req, out := c.DeleteLegalHoldRequest(input) + return out, req.Send() +} + +// DeleteLegalHoldWithContext is the same as DeleteLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteLegalHoldWithContext(ctx aws.Context, input *DeleteLegalHoldInput, opts ...request.Option) (*DeleteLegalHoldOutput, error) { + req, out := c.DeleteLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteObject = "DeleteObject" // DeleteObjectRequest generates a "aws/request.Request" representing the @@ -715,6 +948,79 @@ func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput return out, req.Send() } +const opExtendObjectRetention = "ExtendObjectRetention" + +// ExtendObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the ExtendObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExtendObjectRetention for more information on using the ExtendObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExtendObjectRetentionRequest method. +// req, resp := client.ExtendObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ExtendObjectRetention +func (c *S3) ExtendObjectRetentionRequest(input *ExtendObjectRetentionInput) (req *request.Request, output *ExtendObjectRetentionOutput) { + op := &request.Operation{ + Name: opExtendObjectRetention, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?extendRetention", + } + + if input == nil { + input = &ExtendObjectRetentionInput{} + } + + output = &ExtendObjectRetentionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ExtendObjectRetention API operation for Amazon Simple Storage Service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ExtendObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ExtendObjectRetention +func (c *S3) ExtendObjectRetention(input *ExtendObjectRetentionInput) (*ExtendObjectRetentionOutput, error) { + req, out := c.ExtendObjectRetentionRequest(input) + return out, req.Send() +} + +// ExtendObjectRetentionWithContext is the same as ExtendObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See ExtendObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ExtendObjectRetentionWithContext(ctx aws.Context, input *ExtendObjectRetentionInput, opts ...request.Option) (*ExtendObjectRetentionOutput, error) { + req, out := c.ExtendObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetBucketAcl = "GetBucketAcl" // GetBucketAclRequest generates a "aws/request.Request" representing the @@ -863,118 +1169,192 @@ func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput return out, req.Send() } -const opGetBucketLocation = "GetBucketLocation" +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" -// GetBucketLocationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLocation operation. The "output" return +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetBucketLocation for more information on using the GetBucketLocation +// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetBucketLocationRequest method. -// req, resp := client.GetBucketLocationRequest(params) +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation -func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { op := &request.Operation{ - Name: opGetBucketLocation, + Name: opGetBucketLifecycleConfiguration, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?location", + HTTPPath: "/{Bucket}?lifecycle", } if input == nil { - input = &GetBucketLocationInput{} + input = &GetBucketLifecycleConfigurationInput{} } - output = &GetBucketLocationOutput{} + output = &GetBucketLifecycleConfigurationOutput{} req = c.newRequest(op, input, output) return } -// GetBucketLocation API operation for Amazon Simple Storage Service. +// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. // -// Returns the region the bucket resides in. +// Returns the lifecycle configuration for the bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketLocation for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation -func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { - req, out := c.GetBucketLocationRequest(input) +// API operation GetBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) return out, req.Send() } -// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of // the ability to pass a context and additional request options. // -// See GetBucketLocation for details on how to use this API operation. +// See GetBucketLifecycleConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { - req, out := c.GetBucketLocationRequest(input) +func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetBucketLogging = "GetBucketLogging" +const opGetBucketLocation = "GetBucketLocation" -// GetBucketLoggingRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLogging operation. The "output" return +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetBucketLogging for more information on using the GetBucketLogging +// See GetBucketLocation for more information on using the GetBucketLocation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetBucketLoggingRequest method. -// req, resp := client.GetBucketLoggingRequest(params) +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging -func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { op := &request.Operation{ - Name: opGetBucketLogging, + Name: opGetBucketLocation, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?logging", + HTTPPath: "/{Bucket}?location", } if input == nil { - input = &GetBucketLoggingInput{} + input = &GetBucketLocationInput{} } - output = &GetBucketLoggingOutput{} + output = &GetBucketLocationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLocation API operation for Amazon Simple Storage Service. +// +// Returns the region the bucket resides in. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLocation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + return out, req.Send() +} + +// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLogging for more information on using the GetBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + output = &GetBucketLoggingOutput{} req = c.newRequest(op, input, output) return } @@ -1009,6 +1389,81 @@ func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggin return out, req.Send() } +const opGetBucketProtectionConfiguration = "GetBucketProtectionConfiguration" + +// GetBucketProtectionConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketProtectionConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketProtectionConfiguration for more information on using the GetBucketProtectionConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketProtectionConfigurationRequest method. +// req, resp := client.GetBucketProtectionConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketProtectionConfiguration +func (c *S3) GetBucketProtectionConfigurationRequest(input *GetBucketProtectionConfigurationInput) (req *request.Request, output *GetBucketProtectionConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketProtectionConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?protection", + } + + if input == nil { + input = &GetBucketProtectionConfigurationInput{} + } + + output = &GetBucketProtectionConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketProtectionConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the protection configuration of a bucket.EnablePermanentRetention +// flag will only be returned if the flag is set to true for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketProtectionConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketProtectionConfiguration +func (c *S3) GetBucketProtectionConfiguration(input *GetBucketProtectionConfigurationInput) (*GetBucketProtectionConfigurationOutput, error) { + req, out := c.GetBucketProtectionConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketProtectionConfigurationWithContext is the same as GetBucketProtectionConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketProtectionConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketProtectionConfigurationWithContext(ctx aws.Context, input *GetBucketProtectionConfigurationInput, opts ...request.Option) (*GetBucketProtectionConfigurationOutput, error) { + req, out := c.GetBucketProtectionConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetObject = "GetObject" // GetObjectRequest generates a "aws/request.Request" representing the @@ -1528,6 +1983,80 @@ func (c *S3) ListBucketsExtendedPagesWithContext(ctx aws.Context, input *ListBuc return p.Err() } +const opListLegalHolds = "ListLegalHolds" + +// ListLegalHoldsRequest generates a "aws/request.Request" representing the +// client's request for the ListLegalHolds operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListLegalHolds for more information on using the ListLegalHolds +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListLegalHoldsRequest method. +// req, resp := client.ListLegalHoldsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListLegalHolds +func (c *S3) ListLegalHoldsRequest(input *ListLegalHoldsInput) (req *request.Request, output *ListLegalHoldsOutput) { + op := &request.Operation{ + Name: opListLegalHolds, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?legalHold", + } + + if input == nil { + input = &ListLegalHoldsInput{} + } + + output = &ListLegalHoldsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListLegalHolds API operation for Amazon Simple Storage Service. +// +// Returns a list of legal holds on an object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListLegalHolds for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListLegalHolds +func (c *S3) ListLegalHolds(input *ListLegalHoldsInput) (*ListLegalHoldsOutput, error) { + req, out := c.ListLegalHoldsRequest(input) + return out, req.Send() +} + +// ListLegalHoldsWithContext is the same as ListLegalHolds with the addition of +// the ability to pass a context and additional request options. +// +// See ListLegalHolds for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListLegalHoldsWithContext(ctx aws.Context, input *ListLegalHoldsInput, opts ...request.Option) (*ListLegalHoldsOutput, error) { + req, out := c.ListLegalHoldsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListMultipartUploads = "ListMultipartUploads" // ListMultipartUploadsRequest generates a "aws/request.Request" representing the @@ -2075,184 +2604,337 @@ func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput return out, req.Send() } -const opPutBucketLogging = "PutBucketLogging" +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" -// PutBucketLoggingRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLogging operation. The "output" return +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketLogging for more information on using the PutBucketLogging +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketLoggingRequest method. -// req, resp := client.PutBucketLoggingRequest(params) +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging -func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { op := &request.Operation{ - Name: opPutBucketLogging, + Name: opPutBucketLifecycleConfiguration, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?logging", + HTTPPath: "/{Bucket}?lifecycle", } if input == nil { - input = &PutBucketLoggingInput{} + input = &PutBucketLifecycleConfigurationInput{} } - output = &PutBucketLoggingOutput{} + output = &PutBucketLifecycleConfigurationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutBucketLogging API operation for Amazon Simple Storage Service. +// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Sets the lifecycle configuration for a bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLogging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging -func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { - req, out := c.PutBucketLoggingRequest(input) +// API operation PutBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) return out, req.Send() } -// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of // the ability to pass a context and additional request options. // -// See PutBucketLogging for details on how to use this API operation. +// See PutBucketLifecycleConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { - req, out := c.PutBucketLoggingRequest(input) +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutObject = "PutObject" +const opPutBucketLogging = "PutBucketLogging" -// PutObjectRequest generates a "aws/request.Request" representing the -// client's request for the PutObject operation. The "output" return +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutObject for more information on using the PutObject +// See PutBucketLogging for more information on using the PutBucketLogging // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutObjectRequest method. -// req, resp := client.PutObjectRequest(params) +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject -func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { op := &request.Operation{ - Name: opPutObject, + Name: opPutBucketLogging, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", + HTTPPath: "/{Bucket}?logging", } if input == nil { - input = &PutObjectInput{} + input = &PutBucketLoggingInput{} } - output = &PutObjectOutput{} + output = &PutBucketLoggingOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutObject API operation for Amazon Simple Storage Service. -// -// Adds an object to a bucket. +// PutBucketLogging API operation for Amazon Simple Storage Service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObject for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject -func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) +// API operation PutBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) return out, req.Send() } -// PutObjectWithContext is the same as PutObject with the addition of +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of // the ability to pass a context and additional request options. // -// See PutObject for details on how to use this API operation. +// See PutBucketLogging for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutObjectAcl = "PutObjectAcl" +const opPutBucketProtectionConfiguration = "PutBucketProtectionConfiguration" -// PutObjectAclRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectAcl operation. The "output" return +// PutBucketProtectionConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketProtectionConfiguration operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutObjectAcl for more information on using the PutObjectAcl +// See PutBucketProtectionConfiguration for more information on using the PutBucketProtectionConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutObjectAclRequest method. -// req, resp := client.PutObjectAclRequest(params) +// // Example sending a request using the PutBucketProtectionConfigurationRequest method. +// req, resp := client.PutBucketProtectionConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl -func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketProtectionConfiguration +func (c *S3) PutBucketProtectionConfigurationRequest(input *PutBucketProtectionConfigurationInput) (req *request.Request, output *PutBucketProtectionConfigurationOutput) { op := &request.Operation{ - Name: opPutObjectAcl, + Name: opPutBucketProtectionConfiguration, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?acl", + HTTPPath: "/{Bucket}?protection", + } + + if input == nil { + input = &PutBucketProtectionConfigurationInput{} + } + + output = &PutBucketProtectionConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketProtectionConfiguration API operation for Amazon Simple Storage Service. +// +// Sets the protection configuration of an existing bucket. EnablePermanentRetention +// is optional and if not included is considered to be false. Once set to true, +// must be included in any subsequent PUT Bucket?protection requests for that +// bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketProtectionConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketProtectionConfiguration +func (c *S3) PutBucketProtectionConfiguration(input *PutBucketProtectionConfigurationInput) (*PutBucketProtectionConfigurationOutput, error) { + req, out := c.PutBucketProtectionConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketProtectionConfigurationWithContext is the same as PutBucketProtectionConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketProtectionConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketProtectionConfigurationWithContext(ctx aws.Context, input *PutBucketProtectionConfigurationInput, opts ...request.Option) (*PutBucketProtectionConfigurationOutput, error) { + req, out := c.PutBucketProtectionConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObject for more information on using the PutObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + output = &PutObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObject API operation for Amazon Simple Storage Service. +// +// Adds an object to a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + return out, req.Send() +} + +// PutObjectWithContext is the same as PutObject with the addition of +// the ability to pass a context and additional request options. +// +// See PutObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectAcl for more information on using the PutObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", } if input == nil { @@ -2302,6 +2984,86 @@ func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, return out, req.Send() } +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// Restores an archived copy of an object back into Amazon S3 +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This operation is not allowed against this storage tier +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUploadPart = "UploadPart" // UploadPartRequest generates a "aws/request.Request" representing the @@ -2616,6 +3378,93 @@ func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { return s } +type AddLegalHoldInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // RetentionLegalHoldId is a required field + RetentionLegalHoldId *string `location:"querystring" locationName:"add" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RetentionLegalHoldId == nil { + invalidParams.Add(request.NewErrParamRequired("RetentionLegalHoldId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AddLegalHoldInput) SetBucket(v string) *AddLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *AddLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *AddLegalHoldInput) SetKey(v string) *AddLegalHoldInput { + s.Key = &v + return s +} + +// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value. +func (s *AddLegalHoldInput) SetRetentionLegalHoldId(v string) *AddLegalHoldInput { + s.RetentionLegalHoldId = &v + return s +} + +type AddLegalHoldOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddLegalHoldOutput) GoString() string { + return s.String() +} + type Bucket struct { _ struct{} `type:"structure"` @@ -2729,6 +3578,114 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin return s } +type BucketProtectionDefaultRetention struct { + _ struct{} `type:"structure"` + + // Days is a required field + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s BucketProtectionDefaultRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketProtectionDefaultRetention) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketProtectionDefaultRetention) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketProtectionDefaultRetention"} + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *BucketProtectionDefaultRetention) SetDays(v int64) *BucketProtectionDefaultRetention { + s.Days = &v + return s +} + +type BucketProtectionMaximumRetention struct { + _ struct{} `type:"structure"` + + // Days is a required field + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s BucketProtectionMaximumRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketProtectionMaximumRetention) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketProtectionMaximumRetention) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketProtectionMaximumRetention"} + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *BucketProtectionMaximumRetention) SetDays(v int64) *BucketProtectionMaximumRetention { + s.Days = &v + return s +} + +type BucketProtectionMinimumRetention struct { + _ struct{} `type:"structure"` + + // Days is a required field + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s BucketProtectionMinimumRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketProtectionMinimumRetention) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketProtectionMinimumRetention) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketProtectionMinimumRetention"} + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *BucketProtectionMinimumRetention) SetDays(v int64) *BucketProtectionMinimumRetention { + s.Days = &v + return s +} + type CORSConfiguration struct { _ struct{} `type:"structure"` @@ -2897,6 +3854,25 @@ type CompleteMultipartUploadInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Date on which it will be legal to delete or modify the object. This field + // can only be specified if Retention-Directive is REPLACE. You can only specify + // this or the Retention-Period header. If both are specified a 400 error will + // be returned. If neither is specified the bucket's DefaultRetention period + // will be used. + RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp" timestampFormat:"iso8601"` + + // A single legal hold to apply to the object. This field can only be specified + // if Retention-Directive is REPLACE. A legal hold is a character long string + // of max length 64. The object cannot be overwritten or deleted until all legal + // holds associated with the object are removed. + RetentionLegalHoldId *string `location:"header" locationName:"Retention-Legal-Hold-ID" type:"string"` + + // Retention period to store on the object in seconds. If this field and Retention-Expiration-Date + // are specified a 400 error is returned. If neither is specified the bucket's + // DefaultRetention period will be used. 0 is a legal value assuming the bucket's + // minimum retention period is also 0. + RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"` + // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } @@ -2967,6 +3943,24 @@ func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultip return s } +// SetRetentionExpirationDate sets the RetentionExpirationDate field's value. +func (s *CompleteMultipartUploadInput) SetRetentionExpirationDate(v time.Time) *CompleteMultipartUploadInput { + s.RetentionExpirationDate = &v + return s +} + +// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value. +func (s *CompleteMultipartUploadInput) SetRetentionLegalHoldId(v string) *CompleteMultipartUploadInput { + s.RetentionLegalHoldId = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *CompleteMultipartUploadInput) SetRetentionPeriod(v int64) *CompleteMultipartUploadInput { + s.RetentionPeriod = &v + return s +} + // SetUploadId sets the UploadId field's value. func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { s.UploadId = &v @@ -3182,7 +4176,7 @@ type CopyObjectInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -3220,6 +4214,33 @@ type CopyObjectInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // This header controls how the Protection state of the source object is copied + // to the destination object.If copied, the retention period and all legal holds + // are copied onto the new object. The legal hold date's is set to the date + // of the copy. + RetentionDirective *string `location:"header" locationName:"Retention-Directive" type:"string" enum:"RetentionDirective"` + + // Date on which it will be legal to delete or modify the object. This field + // can only be specified if Retention-Directive is REPLACE. You can only specify + // this or the Retention-Period header. If both are specified a 400 error will + // be returned. If neither is specified the bucket's DefaultRetention period + // will be used. + RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp" timestampFormat:"iso8601"` + + // A single legal hold to apply to the object. This field can only be specified + // if Retention-Directive is REPLACE. A legal hold is a character long string + // of max length 64. The object cannot be overwritten or deleted until all legal + // holds associated with the object are removed. + RetentionLegalHoldId *string `location:"header" locationName:"Retention-Legal-Hold-ID" type:"string"` + + // Retention period to store on the object in seconds. The object can be neither + // overwritten nor deleted until the amount of time specified in the retention + // period has elapsed. If this field and Retention-Expiration-Date are specified + // a 400 error is returned. If neither is specified the bucket's DefaultRetention + // period will be used. 0 is a legal value assuming the bucket's minimum retention + // period is also 0. + RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"` + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` @@ -3228,7 +4249,7 @@ type CopyObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -3456,6 +4477,30 @@ func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { return s } +// SetRetentionDirective sets the RetentionDirective field's value. +func (s *CopyObjectInput) SetRetentionDirective(v string) *CopyObjectInput { + s.RetentionDirective = &v + return s +} + +// SetRetentionExpirationDate sets the RetentionExpirationDate field's value. +func (s *CopyObjectInput) SetRetentionExpirationDate(v time.Time) *CopyObjectInput { + s.RetentionExpirationDate = &v + return s +} + +// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value. +func (s *CopyObjectInput) SetRetentionLegalHoldId(v string) *CopyObjectInput { + s.RetentionLegalHoldId = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *CopyObjectInput) SetRetentionPeriod(v int64) *CopyObjectInput { + s.RetentionPeriod = &v + return s +} + // SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { s.SSECustomerAlgorithm = &v @@ -3926,7 +4971,7 @@ type CreateMultipartUploadInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -4322,18 +5367,124 @@ type DeleteBucketCorsInput struct { } // String returns the string representation -func (s DeleteBucketCorsInput) String() string { +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketCorsInput) GoString() string { +func (s DeleteBucketLifecycleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } @@ -4348,58 +5499,87 @@ func (s *DeleteBucketCorsInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { +func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { s.Bucket = &v return s } -func (s *DeleteBucketCorsInput) getBucket() (v string) { +func (s *DeleteBucketLifecycleInput) getBucket() (v string) { if s.Bucket == nil { return v } return *s.Bucket } -type DeleteBucketCorsOutput struct { +type DeleteBucketLifecycleOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s DeleteBucketCorsOutput) String() string { +func (s DeleteBucketLifecycleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketCorsOutput) GoString() string { +func (s DeleteBucketLifecycleOutput) GoString() string { return s.String() } -type DeleteBucketInput struct { +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteLegalHoldInput struct { _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // RetentionLegalHoldId is a required field + RetentionLegalHoldId *string `location:"querystring" locationName:"remove" type:"string" required:"true"` } // String returns the string representation -func (s DeleteBucketInput) String() string { +func (s DeleteLegalHoldInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketInput) GoString() string { +func (s DeleteLegalHoldInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} +func (s *DeleteLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLegalHoldInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } if s.Bucket != nil && len(*s.Bucket) < 1 { invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RetentionLegalHoldId == nil { + invalidParams.Add(request.NewErrParamRequired("RetentionLegalHoldId")) + } if invalidParams.Len() > 0 { return invalidParams @@ -4408,29 +5588,41 @@ func (s *DeleteBucketInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { +func (s *DeleteLegalHoldInput) SetBucket(v string) *DeleteLegalHoldInput { s.Bucket = &v return s } -func (s *DeleteBucketInput) getBucket() (v string) { +func (s *DeleteLegalHoldInput) getBucket() (v string) { if s.Bucket == nil { return v } return *s.Bucket } -type DeleteBucketOutput struct { +// SetKey sets the Key field's value. +func (s *DeleteLegalHoldInput) SetKey(v string) *DeleteLegalHoldInput { + s.Key = &v + return s +} + +// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value. +func (s *DeleteLegalHoldInput) SetRetentionLegalHoldId(v string) *DeleteLegalHoldInput { + s.RetentionLegalHoldId = &v + return s +} + +type DeleteLegalHoldOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s DeleteBucketOutput) String() string { +func (s DeleteLegalHoldOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketOutput) GoString() string { +func (s DeleteLegalHoldOutput) GoString() string { return s.String() } @@ -4787,6 +5979,136 @@ func (s *Error) SetVersionId(v string) *Error { return s } +type ExtendObjectRetentionInput struct { + _ struct{} `type:"structure"` + + // Additional time, in seconds, to add to the existing retention period for + // the object. If this field and New-Retention-Time and/or New-Retention-Expiration-Date + // are specified, a 400 error will be returned. If none of the Request Headers + // are specified, a 400 error will be returned to the user. The retention period + // of an object may be extended up to bucket maximum retention period from the + // time of the request. + AdditionalRetentionPeriod *int64 `location:"header" locationName:"Additional-Retention-Period" type:"integer"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Retention Period in seconds for the object. The Retention will be enforced + // from the current time until current time + the value in this header. This + // value has to be within the ranges defined for the bucket. + ExtendRetentionFromCurrentTime *int64 `location:"header" locationName:"Extend-Retention-From-Current-Time" type:"integer"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A new retention date to use for the object in place of the existing retention + // date. If this value is less than the existing value stored for the object, + // a 400 error will be returned. If this field and Additional-Retention-Period + // and/or New-Retention-Period and/or Extend-Retention-From-Current-Time are + // specified, a 400 error will be returned. If none of the Request Headers are + // specified, a 400 error will be returned to the user. The retention period + // of an object may be extended up to bucket maximum retention period from the + // time of the request. + NewRetentionExpirationDate *time.Time `location:"header" locationName:"New-Retention-Expiration-Date" type:"timestamp" timestampFormat:"iso8601"` + + // Retention period, in seconds, to use for the object in place of the existing + // retention period stored for the object. If this value is less than the existing + // value stored for the object, a 400 error will be returned. If this field + // and Additional-Retention-Period and/or New-Retention-Expiration-Date are + // specified, a 400 error will be returned. If none of the Request Headers are + // specified, a 400 error will be returned. + NewRetentionPeriod *int64 `location:"header" locationName:"New-Retention-Period" type:"integer"` +} + +// String returns the string representation +func (s ExtendObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExtendObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExtendObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExtendObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalRetentionPeriod sets the AdditionalRetentionPeriod field's value. +func (s *ExtendObjectRetentionInput) SetAdditionalRetentionPeriod(v int64) *ExtendObjectRetentionInput { + s.AdditionalRetentionPeriod = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *ExtendObjectRetentionInput) SetBucket(v string) *ExtendObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *ExtendObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExtendRetentionFromCurrentTime sets the ExtendRetentionFromCurrentTime field's value. +func (s *ExtendObjectRetentionInput) SetExtendRetentionFromCurrentTime(v int64) *ExtendObjectRetentionInput { + s.ExtendRetentionFromCurrentTime = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ExtendObjectRetentionInput) SetKey(v string) *ExtendObjectRetentionInput { + s.Key = &v + return s +} + +// SetNewRetentionExpirationDate sets the NewRetentionExpirationDate field's value. +func (s *ExtendObjectRetentionInput) SetNewRetentionExpirationDate(v time.Time) *ExtendObjectRetentionInput { + s.NewRetentionExpirationDate = &v + return s +} + +// SetNewRetentionPeriod sets the NewRetentionPeriod field's value. +func (s *ExtendObjectRetentionInput) SetNewRetentionPeriod(v int64) *ExtendObjectRetentionInput { + s.NewRetentionPeriod = &v + return s +} + +type ExtendObjectRetentionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ExtendObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExtendObjectRetentionOutput) GoString() string { + return s.String() +} + type GetBucketAclInput struct { _ struct{} `type:"structure"` @@ -4932,6 +6254,75 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { return s } +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Currently only one Rule allowed. + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { + s.Rules = v + return s +} + type GetBucketLocationInput struct { _ struct{} `type:"structure"` @@ -4990,17 +6381,88 @@ func (s GetBucketLocationOutput) String() string { } // GoString returns the string representation -func (s GetBucketLocationOutput) GoString() string { +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { + s.LocationConstraint = &v + return s +} + +type GetBucketLoggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + // Container for logging information. Presence of this element indicates that + // logging is enabled. Parameters TargetBucket and TargetPrefix are required + // in this case. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { return s.String() } -// SetLocationConstraint sets the LocationConstraint field's value. -func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { - s.LocationConstraint = &v +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { + s.LoggingEnabled = v return s } -type GetBucketLoggingInput struct { +type GetBucketProtectionConfigurationInput struct { _ struct{} `type:"structure"` // Bucket is a required field @@ -5008,18 +6470,18 @@ type GetBucketLoggingInput struct { } // String returns the string representation -func (s GetBucketLoggingInput) String() string { +func (s GetBucketProtectionConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketLoggingInput) GoString() string { +func (s GetBucketProtectionConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLoggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} +func (s *GetBucketProtectionConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketProtectionConfigurationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } @@ -5034,40 +6496,38 @@ func (s *GetBucketLoggingInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { +func (s *GetBucketProtectionConfigurationInput) SetBucket(v string) *GetBucketProtectionConfigurationInput { s.Bucket = &v return s } -func (s *GetBucketLoggingInput) getBucket() (v string) { +func (s *GetBucketProtectionConfigurationInput) getBucket() (v string) { if s.Bucket == nil { return v } return *s.Bucket } -type GetBucketLoggingOutput struct { - _ struct{} `type:"structure"` +type GetBucketProtectionConfigurationOutput struct { + _ struct{} `type:"structure" payload:"ProtectionConfiguration"` - // Container for logging information. Presence of this element indicates that - // logging is enabled. Parameters TargetBucket and TargetPrefix are required - // in this case. - LoggingEnabled *LoggingEnabled `type:"structure"` + // Bucket protection configuration + ProtectionConfiguration *ProtectionConfiguration `type:"structure"` } // String returns the string representation -func (s GetBucketLoggingOutput) String() string { +func (s GetBucketProtectionConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketLoggingOutput) GoString() string { +func (s GetBucketProtectionConfigurationOutput) GoString() string { return s.String() } -// SetLoggingEnabled sets the LoggingEnabled field's value. -func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { - s.LoggingEnabled = v +// SetProtectionConfiguration sets the ProtectionConfiguration field's value. +func (s *GetBucketProtectionConfigurationOutput) SetProtectionConfiguration(v *ProtectionConfiguration) *GetBucketProtectionConfigurationOutput { + s.ProtectionConfiguration = v return s } @@ -5260,7 +6720,7 @@ type GetObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -5479,6 +6939,14 @@ type GetObjectOutput struct { // The date and time at which the object is no longer cacheable. Expires *string `location:"header" locationName:"Expires" type:"string"` + IBMRestoredCopyStorageClass *string `location:"header" locationName:"x-ibm-restored-copy-storage-class" type:"string"` + + // This header is only included if an object has transition metadata. This header + // will indicate the transition storage class and time of transition. If this + // header and the x-amz-restore header are both included, this header will indicate + // the time at which the object was originally archived. + IBMTransition *string `location:"header" locationName:"x-ibm-transition" type:"string"` + // Last modified date of the object LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` @@ -5504,6 +6972,20 @@ type GetObjectOutput struct { // of the restored object copy. Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + // Date on which it will be legal to delete or modify the object. You can only + // specify this or the Retention-Period header. If both are specified a 400 + // error will be returned. If neither is specified the bucket's DefaultRetention + // period will be used. + RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp" timestampFormat:"iso8601"` + + RetentionLegalHoldCount *int64 `location:"header" locationName:"Retention-Legal-Hold-Count" type:"integer"` + + // Retention period to store on the object in seconds. If this field and Retention-Expiration-Date + // are specified a 400 error is returned. If neither is specified the bucket's + // DefaultRetention period will be used. 0 is a legal value assuming the bucket's + // minimum retention period is also 0. + RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"` + // If server-side encryption with a customer-provided encryption key was requested, // the response will include this header confirming the encryption algorithm // used. @@ -5624,6 +7106,18 @@ func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { return s } +// SetIBMRestoredCopyStorageClass sets the IBMRestoredCopyStorageClass field's value. +func (s *GetObjectOutput) SetIBMRestoredCopyStorageClass(v string) *GetObjectOutput { + s.IBMRestoredCopyStorageClass = &v + return s +} + +// SetIBMTransition sets the IBMTransition field's value. +func (s *GetObjectOutput) SetIBMTransition(v string) *GetObjectOutput { + s.IBMTransition = &v + return s +} + // SetLastModified sets the LastModified field's value. func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { s.LastModified = &v @@ -5666,6 +7160,24 @@ func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { return s } +// SetRetentionExpirationDate sets the RetentionExpirationDate field's value. +func (s *GetObjectOutput) SetRetentionExpirationDate(v time.Time) *GetObjectOutput { + s.RetentionExpirationDate = &v + return s +} + +// SetRetentionLegalHoldCount sets the RetentionLegalHoldCount field's value. +func (s *GetObjectOutput) SetRetentionLegalHoldCount(v int64) *GetObjectOutput { + s.RetentionLegalHoldCount = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *GetObjectOutput) SetRetentionPeriod(v int64) *GetObjectOutput { + s.RetentionPeriod = &v + return s +} + // SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { s.SSECustomerAlgorithm = &v @@ -5714,6 +7226,44 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput return s } +type GlacierJobParameters struct { + _ struct{} `type:"structure"` + + // Glacier retrieval tier at which the restore will be processed. + // + // Tier is a required field + Tier *string `type:"string" required:"true" enum:"Tier"` +} + +// String returns the string representation +func (s GlacierJobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlacierJobParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlacierJobParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} + if s.Tier == nil { + invalidParams.Add(request.NewErrParamRequired("Tier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTier sets the Tier field's value. +func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { + s.Tier = &v + return s +} + type Grant struct { _ struct{} `type:"structure"` @@ -5962,7 +7512,7 @@ type HeadObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -6139,6 +7689,14 @@ type HeadObjectOutput struct { // The date and time at which the object is no longer cacheable. Expires *string `location:"header" locationName:"Expires" type:"string"` + IBMRestoredCopyStorageClass *string `location:"header" locationName:"x-ibm-restored-copy-storage-class" type:"string"` + + // This header is only included if an object has transition metadata. This header + // will indicate the transition storage class and time of transition. If this + // header and the x-amz-restore header are both included, this header will indicate + // the time at which the object was originally archived. + IBMTransition *string `location:"header" locationName:"x-ibm-transition" type:"string"` + // Last modified date of the object LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` @@ -6164,6 +7722,20 @@ type HeadObjectOutput struct { // of the restored object copy. Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + // Date on which it will be legal to delete or modify the object. You can only + // specify this or the Retention-Period header. If both are specified a 400 + // error will be returned. If neither is specified the bucket's DefaultRetention + // period will be used. + RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp" timestampFormat:"iso8601"` + + RetentionLegalHoldCount *int64 `location:"header" locationName:"Retention-Legal-Hold-Count" type:"integer"` + + // Retention period to store on the object in seconds. If this field and Retention-Expiration-Date + // are specified a 400 error is returned. If neither is specified the bucket's + // DefaultRetention period will be used. 0 is a legal value assuming the bucket's + // minimum retention period is also 0. + RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"` + // If server-side encryption with a customer-provided encryption key was requested, // the response will include this header confirming the encryption algorithm // used. @@ -6269,6 +7841,18 @@ func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { return s } +// SetIBMRestoredCopyStorageClass sets the IBMRestoredCopyStorageClass field's value. +func (s *HeadObjectOutput) SetIBMRestoredCopyStorageClass(v string) *HeadObjectOutput { + s.IBMRestoredCopyStorageClass = &v + return s +} + +// SetIBMTransition sets the IBMTransition field's value. +func (s *HeadObjectOutput) SetIBMTransition(v string) *HeadObjectOutput { + s.IBMTransition = &v + return s +} + // SetLastModified sets the LastModified field's value. func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { s.LastModified = &v @@ -6311,78 +7895,314 @@ func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { return s } +// SetRetentionExpirationDate sets the RetentionExpirationDate field's value. +func (s *HeadObjectOutput) SetRetentionExpirationDate(v time.Time) *HeadObjectOutput { + s.RetentionExpirationDate = &v + return s +} + +// SetRetentionLegalHoldCount sets the RetentionLegalHoldCount field's value. +func (s *HeadObjectOutput) SetRetentionLegalHoldCount(v int64) *HeadObjectOutput { + s.RetentionLegalHoldCount = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *HeadObjectOutput) SetRetentionPeriod(v int64) *HeadObjectOutput { + s.RetentionPeriod = &v + return s +} + // SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { s.SSECustomerAlgorithm = &v return s } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { - s.SSECustomerKeyMD5 = &v - return s +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Initiator) SetDisplayName(v string) *Initiator { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Initiator) SetID(v string) *Initiator { + s.ID = &v + return s +} + +type LegalHold struct { + _ struct{} `type:"structure"` + + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + ID *string `type:"string"` +} + +// String returns the string representation +func (s LegalHold) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LegalHold) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *LegalHold) SetDate(v time.Time) *LegalHold { + s.Date = &v + return s +} + +// SetID sets the ID field's value. +func (s *LegalHold) SetID(v string) *LegalHold { + s.ID = &v + return s +} + +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Currently only one Rule allowed. + // + // Rules is a required field + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *LifecycleConfiguration) SetRules(v []*LifecycleRule) *LifecycleConfiguration { + s.Rules = v + return s +} + +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` +} + +// String returns the string representation +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { + s.Days = &v + return s +} + +type LifecycleRule struct { + _ struct{} `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // The Filter is used to identify objects that a Lifecycle Rule applies to. + // A Filter must have exactly one of Prefix, Tag, or And specified. + // + // Filter is a required field + Filter *LifecycleRuleFilter `type:"structure" required:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + // Currently only one Transition allowed, also Date and Days fields are mutually + // exclusive. + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Filter == nil { + invalidParams.Add(request.NewErrParamRequired("Filter")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { - s.SSEKMSKeyId = &v +// SetExpiration sets the Expiration field's value. +func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { + s.Expiration = v return s } -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { - s.ServerSideEncryption = &v +// SetFilter sets the Filter field's value. +func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { + s.Filter = v return s } -// SetStorageClass sets the StorageClass field's value. -func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { - s.StorageClass = &v +// SetID sets the ID field's value. +func (s *LifecycleRule) SetID(v string) *LifecycleRule { + s.ID = &v return s } -// SetVersionId sets the VersionId field's value. -func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { - s.VersionId = &v +// SetStatus sets the Status field's value. +func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { + s.Status = &v return s } -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { - s.WebsiteRedirectLocation = &v +// SetTransitions sets the Transitions field's value. +func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { + s.Transitions = v return s } -type Initiator struct { +// The Filter is used to identify objects that a Lifecycle Rule applies to. +// A Filter must have exactly one of Prefix, Tag, or And specified. +type LifecycleRuleFilter struct { _ struct{} `type:"structure"` - // Name of the Principal. - DisplayName *string `type:"string"` - - // If the principal is an AWS account, it provides the Canonical User ID. If - // the principal is an IAM User, it provides a user ARN value. - ID *string `type:"string"` + // Empty prefix allowed only. + Prefix *string `type:"string"` } // String returns the string representation -func (s Initiator) String() string { +func (s LifecycleRuleFilter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Initiator) GoString() string { +func (s LifecycleRuleFilter) GoString() string { return s.String() } -// SetDisplayName sets the DisplayName field's value. -func (s *Initiator) SetDisplayName(v string) *Initiator { - s.DisplayName = &v - return s -} - -// SetID sets the ID field's value. -func (s *Initiator) SetID(v string) *Initiator { - s.ID = &v +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { + s.Prefix = &v return s } @@ -6560,6 +8380,119 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { return s } +type ListLegalHoldsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListLegalHoldsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLegalHoldsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListLegalHoldsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListLegalHoldsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListLegalHoldsInput) SetBucket(v string) *ListLegalHoldsInput { + s.Bucket = &v + return s +} + +func (s *ListLegalHoldsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *ListLegalHoldsInput) SetKey(v string) *ListLegalHoldsInput { + s.Key = &v + return s +} + +type ListLegalHoldsOutput struct { + _ struct{} `type:"structure"` + + CreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + LegalHolds []*LegalHold `type:"list"` + + // Retention period to store on the object in seconds. The object can be neither + // overwritten nor deleted until the amount of time specified in the retention + // period has elapsed. If this field and Retention-Expiration-Date are specified + // a 400 error is returned. If neither is specified the bucket's DefaultRetention + // period will be used. 0 is a legal value assuming the bucket's minimum retention + // period is also 0. + RetentionPeriod *int64 `type:"integer"` + + RetentionPeriodExpirationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ListLegalHoldsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLegalHoldsOutput) GoString() string { + return s.String() +} + +// SetCreateTime sets the CreateTime field's value. +func (s *ListLegalHoldsOutput) SetCreateTime(v time.Time) *ListLegalHoldsOutput { + s.CreateTime = &v + return s +} + +// SetLegalHolds sets the LegalHolds field's value. +func (s *ListLegalHoldsOutput) SetLegalHolds(v []*LegalHold) *ListLegalHoldsOutput { + s.LegalHolds = v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *ListLegalHoldsOutput) SetRetentionPeriod(v int64) *ListLegalHoldsOutput { + s.RetentionPeriod = &v + return s +} + +// SetRetentionPeriodExpirationDate sets the RetentionPeriodExpirationDate field's value. +func (s *ListLegalHoldsOutput) SetRetentionPeriodExpirationDate(v time.Time) *ListLegalHoldsOutput { + s.RetentionPeriodExpirationDate = &v + return s +} + type ListMultipartUploadsInput struct { _ struct{} `type:"structure"` @@ -7630,6 +9563,113 @@ func (s *Part) SetSize(v int64) *Part { return s } +type ProtectionConfiguration struct { + _ struct{} `type:"structure"` + + // Default retention period for an object, if a PUT of an object does not specify + // a retention period this value will be converted to seconds and used. + // + // DefaultRetention is a required field + DefaultRetention *BucketProtectionDefaultRetention `type:"structure" required:"true"` + + // Enable permanent retention for an object. + EnablePermanentRetention *bool `type:"boolean"` + + // Maximum retention period for an object, if a PUT of an object specifies a + // longer retention period the PUT object will fail. + // + // MaximumRetention is a required field + MaximumRetention *BucketProtectionMaximumRetention `type:"structure" required:"true"` + + // Minimum retention period for an object, if a PUT of an object specifies a + // shorter retention period the PUT object will fail. + // + // MinimumRetention is a required field + MinimumRetention *BucketProtectionMinimumRetention `type:"structure" required:"true"` + + // Retention status of a bucket. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"BucketProtectionStatus"` +} + +// String returns the string representation +func (s ProtectionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProtectionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProtectionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProtectionConfiguration"} + if s.DefaultRetention == nil { + invalidParams.Add(request.NewErrParamRequired("DefaultRetention")) + } + if s.MaximumRetention == nil { + invalidParams.Add(request.NewErrParamRequired("MaximumRetention")) + } + if s.MinimumRetention == nil { + invalidParams.Add(request.NewErrParamRequired("MinimumRetention")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.DefaultRetention != nil { + if err := s.DefaultRetention.Validate(); err != nil { + invalidParams.AddNested("DefaultRetention", err.(request.ErrInvalidParams)) + } + } + if s.MaximumRetention != nil { + if err := s.MaximumRetention.Validate(); err != nil { + invalidParams.AddNested("MaximumRetention", err.(request.ErrInvalidParams)) + } + } + if s.MinimumRetention != nil { + if err := s.MinimumRetention.Validate(); err != nil { + invalidParams.AddNested("MinimumRetention", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultRetention sets the DefaultRetention field's value. +func (s *ProtectionConfiguration) SetDefaultRetention(v *BucketProtectionDefaultRetention) *ProtectionConfiguration { + s.DefaultRetention = v + return s +} + +// SetEnablePermanentRetention sets the EnablePermanentRetention field's value. +func (s *ProtectionConfiguration) SetEnablePermanentRetention(v bool) *ProtectionConfiguration { + s.EnablePermanentRetention = &v + return s +} + +// SetMaximumRetention sets the MaximumRetention field's value. +func (s *ProtectionConfiguration) SetMaximumRetention(v *BucketProtectionMaximumRetention) *ProtectionConfiguration { + s.MaximumRetention = v + return s +} + +// SetMinimumRetention sets the MinimumRetention field's value. +func (s *ProtectionConfiguration) SetMinimumRetention(v *BucketProtectionMinimumRetention) *ProtectionConfiguration { + s.MinimumRetention = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ProtectionConfiguration) SetStatus(v string) *ProtectionConfiguration { + s.Status = &v + return s +} + type PutBucketAclInput struct { _ struct{} `type:"structure" payload:"AccessControlPolicy"` @@ -7726,73 +9766,150 @@ func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { return s } -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { - s.GrantReadACP = &v +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { + s.GrantWriteACP = &v + return s +} + +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +type PutBucketCorsInput struct { + _ struct{} `type:"structure" payload:"CORSConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // CORSConfiguration is a required field + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { + s.Bucket = &v return s } -// SetGrantWrite sets the GrantWrite field's value. -func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { - s.GrantWrite = &v - return s +func (s *PutBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { - s.GrantWriteACP = &v +// SetCORSConfiguration sets the CORSConfiguration field's value. +func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { + s.CORSConfiguration = v return s } -type PutBucketAclOutput struct { +type PutBucketCorsOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s PutBucketAclOutput) String() string { +func (s PutBucketCorsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketAclOutput) GoString() string { +func (s PutBucketCorsOutput) GoString() string { return s.String() } -type PutBucketCorsInput struct { - _ struct{} `type:"structure" payload:"CORSConfiguration"` +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // CORSConfiguration is a required field - CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // LifecycleConfiguration is a required field + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s PutBucketCorsInput) String() string { +func (s PutBucketLifecycleConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketCorsInput) GoString() string { +func (s PutBucketLifecycleConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } if s.Bucket != nil && len(*s.Bucket) < 1 { invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } - if s.CORSConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + if s.LifecycleConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("LifecycleConfiguration")) } - if s.CORSConfiguration != nil { - if err := s.CORSConfiguration.Validate(); err != nil { - invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) } } @@ -7803,35 +9920,35 @@ func (s *PutBucketCorsInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { +func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { s.Bucket = &v return s } -func (s *PutBucketCorsInput) getBucket() (v string) { +func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { if s.Bucket == nil { return v } return *s.Bucket } -// SetCORSConfiguration sets the CORSConfiguration field's value. -func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { - s.CORSConfiguration = v +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleConfigurationInput { + s.LifecycleConfiguration = v return s } -type PutBucketCorsOutput struct { +type PutBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s PutBucketCorsOutput) String() string { +func (s PutBucketLifecycleConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketCorsOutput) GoString() string { +func (s PutBucketLifecycleConfigurationOutput) GoString() string { return s.String() } @@ -7912,6 +10029,83 @@ func (s PutBucketLoggingOutput) GoString() string { return s.String() } +type PutBucketProtectionConfigurationInput struct { + _ struct{} `type:"structure" payload:"ProtectionConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ProtectionConfiguration is a required field + ProtectionConfiguration *ProtectionConfiguration `locationName:"ProtectionConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketProtectionConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketProtectionConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketProtectionConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketProtectionConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ProtectionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ProtectionConfiguration")) + } + if s.ProtectionConfiguration != nil { + if err := s.ProtectionConfiguration.Validate(); err != nil { + invalidParams.AddNested("ProtectionConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketProtectionConfigurationInput) SetBucket(v string) *PutBucketProtectionConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketProtectionConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetProtectionConfiguration sets the ProtectionConfiguration field's value. +func (s *PutBucketProtectionConfigurationInput) SetProtectionConfiguration(v *ProtectionConfiguration) *PutBucketProtectionConfigurationInput { + s.ProtectionConfiguration = v + return s +} + +type PutBucketProtectionConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketProtectionConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketProtectionConfigurationOutput) GoString() string { + return s.String() +} + type PutObjectAclInput struct { _ struct{} `type:"structure" payload:"AccessControlPolicy"` @@ -8154,6 +10348,25 @@ type PutObjectInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Date on which it will be legal to delete or modify the object. This field + // can only be specified if Retention-Directive is REPLACE. You can only specify + // this or the Retention-Period header. If both are specified a 400 error will + // be returned. If neither is specified the bucket's DefaultRetention period + // will be used. + RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp" timestampFormat:"iso8601"` + + // A single legal hold to apply to the object. This field can only be specified + // if Retention-Directive is REPLACE. A legal hold is a character long string + // of max length 64. The object cannot be overwritten or deleted until all legal + // holds associated with the object are removed. + RetentionLegalHoldId *string `location:"header" locationName:"Retention-Legal-Hold-ID" type:"string"` + + // Retention period to store on the object in seconds. If this field and Retention-Expiration-Date + // are specified a 400 error is returned. If neither is specified the bucket's + // DefaultRetention period will be used. 0 is a legal value assuming the bucket's + // minimum retention period is also 0. + RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"` + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` @@ -8162,7 +10375,7 @@ type PutObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -8339,6 +10552,24 @@ func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { return s } +// SetRetentionExpirationDate sets the RetentionExpirationDate field's value. +func (s *PutObjectInput) SetRetentionExpirationDate(v time.Time) *PutObjectInput { + s.RetentionExpirationDate = &v + return s +} + +// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value. +func (s *PutObjectInput) SetRetentionLegalHoldId(v string) *PutObjectInput { + s.RetentionLegalHoldId = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *PutObjectInput) SetRetentionPeriod(v int64) *PutObjectInput { + s.RetentionPeriod = &v + return s +} + // SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { s.SSECustomerAlgorithm = &v @@ -8488,6 +10719,148 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { return s } +type RestoreObjectInput struct { + _ struct{} `type:"structure" payload:"RestoreRequest"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Container for restore job parameters. + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { + s.Bucket = &v + return s +} + +func (s *RestoreObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { + s.Key = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v + return s +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +// Container for restore job parameters. +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. + // + // Days is a required field + Days *int64 `type:"integer" required:"true"` + + GlacierJobParameters *GlacierJobParameters `type:"structure"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s +} + +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s +} + type TargetGrant struct { _ struct{} `type:"structure"` @@ -8534,6 +10907,49 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { return s } +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *Transition) SetDate(v time.Time) *Transition { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Transition) SetDays(v int64) *Transition { + s.Days = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Transition) SetStorageClass(v string) *Transition { + s.StorageClass = &v + return s +} + type UploadPartCopyInput struct { _ struct{} `type:"structure"` @@ -8572,7 +10988,7 @@ type UploadPartCopyInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -8603,7 +11019,7 @@ type UploadPartCopyInput struct { // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -8908,7 +11324,7 @@ type UploadPartInput struct { // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -9174,6 +11590,11 @@ const ( BucketLogsPermissionWrite = "WRITE" ) +const ( + // BucketProtectionStatusRetention is a BucketProtectionStatus enum value + BucketProtectionStatusRetention = "Retention" +) + // Requests Amazon S3 to encode the object keys in the response and specifies // the encoding method to use. An object key may contain any Unicode character; // however, XML 1.0 parser cannot parse some characters, such as characters @@ -9185,6 +11606,14 @@ const ( EncodingTypeUrl = "url" ) +const ( + // ExpirationStatusEnabled is a ExpirationStatus enum value + ExpirationStatusEnabled = "Enabled" + + // ExpirationStatusDisabled is a ExpirationStatus enum value + ExpirationStatusDisabled = "Disabled" +) + const ( // MetadataDirectiveCopy is a MetadataDirective enum value MetadataDirectiveCopy = "COPY" @@ -9286,6 +11715,14 @@ const ( RequestPayerRequester = "requester" ) +const ( + // RetentionDirectiveCopy is a RetentionDirective enum value + RetentionDirectiveCopy = "COPY" + + // RetentionDirectiveReplace is a RetentionDirective enum value + RetentionDirectiveReplace = "REPLACE" +) + const ( // ServerSideEncryptionAes256 is a ServerSideEncryption enum value ServerSideEncryptionAes256 = "AES256" @@ -9325,6 +11762,16 @@ const ( TaggingDirectiveReplace = "REPLACE" ) +const ( + // TierBulk is a Tier enum value + TierBulk = "Bulk" +) + +const ( + // TransitionStorageClassGlacier is a TransitionStorageClass enum value + TransitionStorageClassGlacier = "GLACIER" +) + const ( // TypeCanonicalUser is a Type enum value TypeCanonicalUser = "CanonicalUser" diff --git a/service/s3/bucket_location.go b/service/s3/bucket_location.go index 3f8b4f44..3b0521e8 100644 --- a/service/s3/bucket_location.go +++ b/service/s3/bucket_location.go @@ -79,7 +79,8 @@ func buildGetBucketLocation(r *request.Request) { out := r.Data.(*GetBucketLocationOutput) b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed reading response body", err) + r.Error = awserr.New(request.ErrCodeSerialization, + "failed reading response body", err) return } diff --git a/service/s3/bucket_location_test.go b/service/s3/bucket_location_test.go index ab1ab609..e3c35a3b 100644 --- a/service/s3/bucket_location_test.go +++ b/service/s3/bucket_location_test.go @@ -114,7 +114,7 @@ func TestWithNormalizeBucketLocation(t *testing.T) { func TestNoPopulateLocationConstraintIfProvided(t *testing.T) { s := s3.New(unit.Session) req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{ - Bucket: aws.String("bucket"), + Bucket: aws.String("bucket"), CreateBucketConfiguration: &s3.CreateBucketConfiguration{}, }) if err := req.Build(); err != nil { diff --git a/service/s3/customizations.go b/service/s3/customizations.go index 892f58ec..e5e46aaa 100644 --- a/service/s3/customizations.go +++ b/service/s3/customizations.go @@ -17,7 +17,8 @@ func defaultInitClientFn(c *client.Client) { // Require SSL when using SSE keys c.Handlers.Validate.PushBack(validateSSERequiresSSL) - c.Handlers.Build.PushBack(computeSSEKeys) + c.Handlers.Build.PushBack(computeSSEKeyMD5) + c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) // S3 uses custom error unmarshaling logic c.Handlers.UnmarshalError.Clear() @@ -30,14 +31,14 @@ func defaultInitRequestFn(r *request.Request) { // e.g. 100-continue support for PUT requests using Go 1.6 platformRequestHandlers(r) + // md5 required switch r.Operation.Name { - case opPutBucketCors, opDeleteObjects: - //opPutBucketLifecycle, opPutBucketPolicy, - //opPutBucketTagging, opPutBucketLifecycleConfiguration, - //opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration, - //opPutBucketReplication: - // These S3 operations require Content-MD5 to be set + case opPutBucketCors, opDeleteObjects, opPutBucketProtectionConfiguration, + opPutBucketLifecycleConfiguration, opCompleteMultipartUpload: r.Handlers.Build.PushBack(contentMD5) + } + // everything else + switch r.Operation.Name { case opGetBucketLocation: // GetBucketLocation has custom parsing logic r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) diff --git a/service/s3/doc_custom.go b/service/s3/doc_custom.go index 39b912c2..4b65f715 100644 --- a/service/s3/doc_custom.go +++ b/service/s3/doc_custom.go @@ -63,6 +63,20 @@ // See the s3manager package's Downloader type documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader // +// Automatic URI cleaning +// +// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) +// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct +// used by the service client. +// +// svc := s3.New(sess, &aws.Config{ +// DisableRestProtocolURICleaning: aws.Bool(true), +// }) +// out, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String("bucketname"), +// Key: aws.String("//foo//bar//moo"), +// }) +// // Get Bucket Region // // GetBucketRegion will attempt to get the region for a bucket using a region diff --git a/service/s3/errors.go b/service/s3/errors.go index bea61736..931cb17b 100644 --- a/service/s3/errors.go +++ b/service/s3/errors.go @@ -33,6 +33,12 @@ const ( // The specified multipart upload does not exist. ErrCodeNoSuchUpload = "NoSuchUpload" + // ErrCodeObjectAlreadyInActiveTierError for service response error code + // "ObjectAlreadyInActiveTierError". + // + // This operation is not allowed against this storage tier + ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" + // ErrCodeObjectNotInActiveTierError for service response error code // "ObjectNotInActiveTierError". // diff --git a/service/s3/s3crypto/decryption_client.go b/service/s3/s3crypto/decryption_client.go index f335edc1..26d8dce9 100644 --- a/service/s3/s3crypto/decryption_client.go +++ b/service/s3/s3crypto/decryption_client.go @@ -58,7 +58,7 @@ func NewDecryptionClient(prov client.ConfigProvider, options ...func(*Decryption }).decryptHandler, }, CEKRegistry: map[string]CEKEntry{ - AESGCMNoPadding: newAESGCMContentCipher, + AESGCMNoPadding: newAESGCMContentCipher, strings.Join([]string{AESCBC, AESCBCPadder.Name()}, "/"): newAESCBCContentCipher, }, PadderRegistry: map[string]Padder{ diff --git a/service/s3/s3crypto/kms_key_handler.go b/service/s3/s3crypto/kms_key_handler.go index 9c7c8bc7..5b8fa5aa 100644 --- a/service/s3/s3crypto/kms_key_handler.go +++ b/service/s3/s3crypto/kms_key_handler.go @@ -65,7 +65,7 @@ func NewKMSKeyGeneratorWithMatDesc(kmsClient kmsiface.KMSAPI, cmkID string, matd // decryptHandler := s3crypto.NewKMSWrapEntry(customKMSClient) // // svc := s3crypto.NewDecryptionClient(sess, func(svc *s3crypto.DecryptionClient) { -// svc.WrapRegistry[KMSWrap] = decryptHandler +// svc.WrapRegistry[s3crypto.KMSWrap] = decryptHandler // })) func NewKMSWrapEntry(kmsClient kmsiface.KMSAPI) WrapEntry { // These values are read only making them thread safe diff --git a/service/s3/s3iface/interface.go b/service/s3/s3iface/interface.go index 01b75c7f..a9907b44 100644 --- a/service/s3/s3iface/interface.go +++ b/service/s3/s3iface/interface.go @@ -64,6 +64,10 @@ type S3API interface { AbortMultipartUploadWithContext(aws.Context, *s3.AbortMultipartUploadInput, ...request.Option) (*s3.AbortMultipartUploadOutput, error) AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput) + AddLegalHold(*s3.AddLegalHoldInput) (*s3.AddLegalHoldOutput, error) + AddLegalHoldWithContext(aws.Context, *s3.AddLegalHoldInput, ...request.Option) (*s3.AddLegalHoldOutput, error) + AddLegalHoldRequest(*s3.AddLegalHoldInput) (*request.Request, *s3.AddLegalHoldOutput) + CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) CompleteMultipartUploadWithContext(aws.Context, *s3.CompleteMultipartUploadInput, ...request.Option) (*s3.CompleteMultipartUploadOutput, error) CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput) @@ -88,6 +92,14 @@ type S3API interface { DeleteBucketCorsWithContext(aws.Context, *s3.DeleteBucketCorsInput, ...request.Option) (*s3.DeleteBucketCorsOutput, error) DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) + DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) + DeleteBucketLifecycleWithContext(aws.Context, *s3.DeleteBucketLifecycleInput, ...request.Option) (*s3.DeleteBucketLifecycleOutput, error) + DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) + + DeleteLegalHold(*s3.DeleteLegalHoldInput) (*s3.DeleteLegalHoldOutput, error) + DeleteLegalHoldWithContext(aws.Context, *s3.DeleteLegalHoldInput, ...request.Option) (*s3.DeleteLegalHoldOutput, error) + DeleteLegalHoldRequest(*s3.DeleteLegalHoldInput) (*request.Request, *s3.DeleteLegalHoldOutput) + DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) DeleteObjectWithContext(aws.Context, *s3.DeleteObjectInput, ...request.Option) (*s3.DeleteObjectOutput, error) DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput) @@ -96,6 +108,10 @@ type S3API interface { DeleteObjectsWithContext(aws.Context, *s3.DeleteObjectsInput, ...request.Option) (*s3.DeleteObjectsOutput, error) DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) + ExtendObjectRetention(*s3.ExtendObjectRetentionInput) (*s3.ExtendObjectRetentionOutput, error) + ExtendObjectRetentionWithContext(aws.Context, *s3.ExtendObjectRetentionInput, ...request.Option) (*s3.ExtendObjectRetentionOutput, error) + ExtendObjectRetentionRequest(*s3.ExtendObjectRetentionInput) (*request.Request, *s3.ExtendObjectRetentionOutput) + GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) GetBucketAclWithContext(aws.Context, *s3.GetBucketAclInput, ...request.Option) (*s3.GetBucketAclOutput, error) GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput) @@ -104,6 +120,10 @@ type S3API interface { GetBucketCorsWithContext(aws.Context, *s3.GetBucketCorsInput, ...request.Option) (*s3.GetBucketCorsOutput, error) GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) + GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error) + GetBucketLifecycleConfigurationWithContext(aws.Context, *s3.GetBucketLifecycleConfigurationInput, ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error) + GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput) + GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) GetBucketLocationWithContext(aws.Context, *s3.GetBucketLocationInput, ...request.Option) (*s3.GetBucketLocationOutput, error) GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput) @@ -112,6 +132,10 @@ type S3API interface { GetBucketLoggingWithContext(aws.Context, *s3.GetBucketLoggingInput, ...request.Option) (*s3.GetBucketLoggingOutput, error) GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput) + GetBucketProtectionConfiguration(*s3.GetBucketProtectionConfigurationInput) (*s3.GetBucketProtectionConfigurationOutput, error) + GetBucketProtectionConfigurationWithContext(aws.Context, *s3.GetBucketProtectionConfigurationInput, ...request.Option) (*s3.GetBucketProtectionConfigurationOutput, error) + GetBucketProtectionConfigurationRequest(*s3.GetBucketProtectionConfigurationInput) (*request.Request, *s3.GetBucketProtectionConfigurationOutput) + GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error) GetObjectWithContext(aws.Context, *s3.GetObjectInput, ...request.Option) (*s3.GetObjectOutput, error) GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) @@ -139,6 +163,10 @@ type S3API interface { ListBucketsExtendedPages(*s3.ListBucketsExtendedInput, func(*s3.ListBucketsExtendedOutput, bool) bool) error ListBucketsExtendedPagesWithContext(aws.Context, *s3.ListBucketsExtendedInput, func(*s3.ListBucketsExtendedOutput, bool) bool, ...request.Option) error + ListLegalHolds(*s3.ListLegalHoldsInput) (*s3.ListLegalHoldsOutput, error) + ListLegalHoldsWithContext(aws.Context, *s3.ListLegalHoldsInput, ...request.Option) (*s3.ListLegalHoldsOutput, error) + ListLegalHoldsRequest(*s3.ListLegalHoldsInput) (*request.Request, *s3.ListLegalHoldsOutput) + ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error) ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) @@ -168,10 +196,18 @@ type S3API interface { PutBucketCorsWithContext(aws.Context, *s3.PutBucketCorsInput, ...request.Option) (*s3.PutBucketCorsOutput, error) PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) + PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error) + PutBucketLifecycleConfigurationWithContext(aws.Context, *s3.PutBucketLifecycleConfigurationInput, ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error) + PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput) + PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) PutBucketLoggingWithContext(aws.Context, *s3.PutBucketLoggingInput, ...request.Option) (*s3.PutBucketLoggingOutput, error) PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput) + PutBucketProtectionConfiguration(*s3.PutBucketProtectionConfigurationInput) (*s3.PutBucketProtectionConfigurationOutput, error) + PutBucketProtectionConfigurationWithContext(aws.Context, *s3.PutBucketProtectionConfigurationInput, ...request.Option) (*s3.PutBucketProtectionConfigurationOutput, error) + PutBucketProtectionConfigurationRequest(*s3.PutBucketProtectionConfigurationInput) (*request.Request, *s3.PutBucketProtectionConfigurationOutput) + PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error) PutObjectWithContext(aws.Context, *s3.PutObjectInput, ...request.Option) (*s3.PutObjectOutput, error) PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) @@ -180,6 +216,10 @@ type S3API interface { PutObjectAclWithContext(aws.Context, *s3.PutObjectAclInput, ...request.Option) (*s3.PutObjectAclOutput, error) PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) + RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) + RestoreObjectWithContext(aws.Context, *s3.RestoreObjectInput, ...request.Option) (*s3.RestoreObjectOutput, error) + RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) + UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error) UploadPartWithContext(aws.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error) UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput) diff --git a/service/s3/s3manager/batch.go b/service/s3/s3manager/batch.go index e1f8feab..0221a700 100644 --- a/service/s3/s3manager/batch.go +++ b/service/s3/s3manager/batch.go @@ -273,7 +273,7 @@ type DeleteObjectsIterator struct { inc bool } -// Next will increment the default iterator's index and and ensure that there +// Next will increment the default iterator's index and ensure that there // is another object to iterator to. func (iter *DeleteObjectsIterator) Next() bool { if iter.inc { @@ -458,7 +458,7 @@ type DownloadObjectsIterator struct { inc bool } -// Next will increment the default iterator's index and and ensure that there +// Next will increment the default iterator's index and ensure that there // is another object to iterator to. func (batcher *DownloadObjectsIterator) Next() bool { if batcher.inc { @@ -497,7 +497,7 @@ type UploadObjectsIterator struct { inc bool } -// Next will increment the default iterator's index and and ensure that there +// Next will increment the default iterator's index and ensure that there // is another object to iterator to. func (batcher *UploadObjectsIterator) Next() bool { if batcher.inc { diff --git a/service/s3/s3manager/download.go b/service/s3/s3manager/download.go index 364ade67..0139427c 100644 --- a/service/s3/s3manager/download.go +++ b/service/s3/s3manager/download.go @@ -99,7 +99,7 @@ func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downl // sess := session.Must(session.NewSession()) // // // The S3 client the S3 Downloader will use -// s3Svc := s3.new(sess) +// s3Svc := s3.New(sess) // // // Create a downloader with the s3 client and default options // downloader := s3manager.NewDownloaderWithClient(s3Svc) @@ -126,7 +126,8 @@ type maxRetrier interface { } // Download downloads an object in S3 and writes the payload into w using -// concurrent GET requests. +// concurrent GET requests. The n int64 returned is the size of the object downloaded +// in bytes. // // Additional functional options can be provided to configure the individual // download. These options are copies of the Downloader instance Download is called from. @@ -148,7 +149,8 @@ func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options .. } // DownloadWithContext downloads an object in S3 and writes the payload into w -// using concurrent GET requests. +// using concurrent GET requests. The n int64 returned is the size of the object downloaded +// in bytes. // // DownloadWithContext is the same as Download with the additional support for // Context input parameters. The Context must not be nil. A nil Context will diff --git a/service/s3/s3manager/s3manageriface/interface.go b/service/s3/s3manager/s3manageriface/interface.go index bd95a984..2fbe7dd7 100644 --- a/service/s3/s3manager/s3manageriface/interface.go +++ b/service/s3/s3manager/s3manageriface/interface.go @@ -9,13 +9,21 @@ import ( "github.com/IBM/ibm-cos-sdk-go/service/s3/s3manager" ) +var _ DownloaderAPI = (*s3manager.Downloader)(nil) + // DownloaderAPI is the interface type for s3manager.Downloader. type DownloaderAPI interface { Download(io.WriterAt, *s3.GetObjectInput, ...func(*s3manager.Downloader)) (int64, error) DownloadWithContext(aws.Context, io.WriterAt, *s3.GetObjectInput, ...func(*s3manager.Downloader)) (int64, error) } -var _ DownloaderAPI = (*s3manager.Downloader)(nil) +// DownloadWithIterator is the interface type for the contained method of the same name. +type DownloadWithIterator interface { + DownloadWithIterator(aws.Context, s3manager.BatchDownloadIterator, ...func(*s3manager.Downloader)) error +} + +var _ UploaderAPI = (*s3manager.Uploader)(nil) +var _ UploadWithIterator = (*s3manager.Uploader)(nil) // UploaderAPI is the interface type for s3manager.Uploader. type UploaderAPI interface { @@ -23,4 +31,16 @@ type UploaderAPI interface { UploadWithContext(aws.Context, *s3manager.UploadInput, ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) } -var _ UploaderAPI = (*s3manager.Uploader)(nil) +// UploadWithIterator is the interface for uploading objects to S3 using the S3 +// upload manager. +type UploadWithIterator interface { + UploadWithIterator(aws.Context, s3manager.BatchUploadIterator, ...func(*s3manager.Uploader)) error +} + +var _ BatchDelete = (*s3manager.BatchDelete)(nil) + +// BatchDelete is the interface type for batch deleting objects from S3 using +// the S3 manager. (separated for user to compose). +type BatchDelete interface { + Delete(aws.Context, s3manager.BatchDeleteIterator) error +} diff --git a/service/s3/s3manager/upload.go b/service/s3/s3manager/upload.go index ecda3887..15c9256b 100644 --- a/service/s3/s3manager/upload.go +++ b/service/s3/s3manager/upload.go @@ -96,100 +96,6 @@ func (m multiUploadError) UploadID() string { return m.uploadID } -//// UploadInput contains all input for upload requests to Amazon S3. -//type UploadInput struct { -// // The canned ACL to apply to the object. -// ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` -// -// Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -// -// // Specifies caching behavior along the request/reply chain. -// CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` -// -// // Specifies presentational information for the object. -// ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` -// -// // Specifies what content encodings have been applied to the object and thus -// // what decoding mechanisms must be applied to obtain the media-type referenced -// // by the Content-Type header field. -// ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` -// -// // The language the content is in. -// ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` -// -// // The base64-encoded 128-bit MD5 digest of the part data. -// ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` -// -// // A standard MIME type describing the format of the object data. -// ContentType *string `location:"header" locationName:"Content-Type" type:"string"` -// -// // The date and time at which the object is no longer cacheable. -// Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` -// -// // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. -// GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` -// -// // Allows grantee to read the object data and its metadata. -// GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` -// -// // Allows grantee to read the object ACL. -// GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` -// -// // Allows grantee to write the ACL for the applicable object. -// GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` -// -// Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` -// -// // A map of metadata to store with the object in S3. -// Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` -// -// // Confirms that the requester knows that she or he will be charged for the -// // request. Bucket owners need not specify this parameter in their requests. -// // Documentation on downloading objects from requester pays buckets can be found -// // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html -// RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` -// -// // Specifies the algorithm to use to when encrypting the object (e.g., AES256, -// // aws:kms). -// SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` -// -// // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting -// // data. This value is used to store the object and then it is discarded; Amazon -// // does not store the encryption key. The key must be appropriate for use with -// // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm -// // header. -// SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` -// -// // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. -// // Amazon S3 uses this header for a message integrity check to ensure the encryption -// // key was transmitted without error. -// SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` -// -// // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT -// // requests for an object protected by AWS KMS will fail if not made via SSL -// // or using SigV4. Documentation on configuring any of the officially supported -// // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version -// SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` -// -// // The Server-side encryption algorithm used when storing this object in S3 -// // (e.g., AES256, aws:kms). -// ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` -// -// // The type of storage to use for the object. Defaults to 'STANDARD'. -// StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"` -// -// // The tag-set for the object. The tag-set must be encoded as URL Query parameters -// Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` -// -// // If the bucket is configured as a website, redirects requests for this object -// // to another object in the same bucket or to an external URL. Amazon S3 stores -// // the value of this header in the object metadata. -// WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -// -// // The readable body payload to send to S3. -// Body io.Reader -//} - // UploadOutput represents a response from the Upload() call. type UploadOutput struct { // The URL where the object was uploaded to. @@ -239,8 +145,13 @@ type Uploader struct { // MaxUploadParts is the max number of parts which will be uploaded to S3. // Will be used to calculate the partsize of the object to be uploaded. // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file - // as 100, 50MB parts. - // With a limited of s3.MaxUploadParts (10,000 parts). + // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts). + // + // MaxUploadParts must not be used to limit the total number of bytes uploaded. + // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader) + // instead. An io.LimitReader is helpful when uploading an unbounded reader + // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned + // error must be used to signal end of stream. // // Defaults to package const's MaxUploadParts value. MaxUploadParts int @@ -452,7 +363,9 @@ type uploader struct { // internal logic for deciding whether to upload a single part or use a // multipart upload. func (u *uploader) upload() (*UploadOutput, error) { - u.init() + if err := u.init(); err != nil { + return nil, awserr.New("ReadRequestBody", "unable to initialize upload", err) + } if u.cfg.PartSize < MinUploadPartSize { msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize) @@ -472,7 +385,7 @@ func (u *uploader) upload() (*UploadOutput, error) { } // init will initialize all default options. -func (u *uploader) init() { +func (u *uploader) init() error { if u.cfg.Concurrency == 0 { u.cfg.Concurrency = DefaultUploadConcurrency } @@ -488,19 +401,19 @@ func (u *uploader) init() { } // Try to get the total size for some optimizations - u.initSize() + return u.initSize() } // initSize tries to detect the total stream size, setting u.totalSize. If // the size is not known, totalSize is set to -1. -func (u *uploader) initSize() { +func (u *uploader) initSize() error { u.totalSize = -1 switch r := u.in.Body.(type) { case io.Seeker: n, err := aws.SeekerLen(r) if err != nil { - return + return err } u.totalSize = n @@ -512,6 +425,8 @@ func (u *uploader) initSize() { u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1 } } + + return nil } // nextReader returns a seekable reader representing the next packet of data. @@ -636,21 +551,6 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa // Read and queue the rest of the parts for u.geterr() == nil && err == nil { - num++ - // This upload exceeded maximum number of supported parts, error now. - if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) { - var msg string - if num > int64(u.cfg.MaxUploadParts) { - msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", - u.cfg.MaxUploadParts) - } else { - msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", - MaxUploadParts) - } - u.seterr(awserr.New("TotalPartsExceeded", msg, nil)) - break - } - var reader io.ReadSeeker var nextChunkLen int var part []byte @@ -671,6 +571,21 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa break } + num++ + // This upload exceeded maximum number of supported parts, error now. + if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) { + var msg string + if num > int64(u.cfg.MaxUploadParts) { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.cfg.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + u.seterr(awserr.New("TotalPartsExceeded", msg, nil)) + break + } + ch <- chunk{buf: reader, part: part, num: num} } @@ -795,13 +710,11 @@ func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { // Parts must be sorted in PartNumber order. sort.Sort(u.parts) + params := &s3.CompleteMultipartUploadInput{} + awsutil.Copy(params, u.in) + params.UploadId = &u.uploadID + params.MultipartUpload = &s3.CompletedMultipartUpload{Parts: u.parts} - params := &s3.CompleteMultipartUploadInput{ - Bucket: u.in.Bucket, - Key: u.in.Key, - UploadId: &u.uploadID, - MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts}, - } resp, err := u.cfg.S3.CompleteMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) if err != nil { u.seterr(err) diff --git a/service/s3/s3manager/upload_input.go b/service/s3/s3manager/upload_input.go index 3f68d4e0..e4200fbc 100644 --- a/service/s3/s3manager/upload_input.go +++ b/service/s3/s3manager/upload_input.go @@ -75,6 +75,25 @@ type UploadInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Date on which it will be legal to delete or modify the object. This field + // can only be specified if Retention-Directive is REPLACE. You can only specify + // this or the Retention-Period header. If both are specified a 400 error will + // be returned. If neither is specified the bucket's DefaultRetention period + // will be used. + RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp" timestampFormat:"iso8601"` + + // A single legal hold to apply to the object. This field can only be specified + // if Retention-Directive is REPLACE. A legal hold is a character long string + // of max length 64. The object cannot be overwritten or deleted until all legal + // holds associated with the object are removed. + RetentionLegalHoldId *string `location:"header" locationName:"Retention-Legal-Hold-ID" type:"string"` + + // Retention period to store on the object in seconds. If this field and Retention-Expiration-Date + // are specified a 400 error is returned. If neither is specified the bucket's + // DefaultRetention period will be used. 0 is a legal value assuming the bucket's + // minimum retention period is also 0. + RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"` + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` @@ -83,7 +102,7 @@ type UploadInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption diff --git a/service/s3/s3manager/upload_test.go b/service/s3/s3manager/upload_test.go index a82efbca..f23ee32b 100644 --- a/service/s3/s3manager/upload_test.go +++ b/service/s3/s3manager/upload_test.go @@ -7,12 +7,15 @@ import ( "io/ioutil" "net/http" "net/http/httptest" + "os" "reflect" "regexp" "sort" + "strconv" "strings" "sync" "testing" + "time" "github.com/IBM/ibm-cos-sdk-go/aws" "github.com/IBM/ibm-cos-sdk-go/aws/awserr" @@ -993,3 +996,308 @@ func TestUploadWithContextCanceled(t *testing.T) { t.Errorf("expected error message to contain %q, but did not %q", e, a) } } + +// S3 Uploader incorrectly fails an upload if the content being uploaded +// has a size of MinPartSize * MaxUploadParts. +// Github: aws/aws-sdk-go#2557 +func TestUploadMaxPartsEOF(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.PartSize = s3manager.DefaultUploadPartSize + u.MaxUploadParts = 2 + }) + f := bytes.NewReader(make([]byte, int(mgr.PartSize)*mgr.MaxUploadParts)) + + r1 := io.NewSectionReader(f, 0, s3manager.DefaultUploadPartSize) + r2 := io.NewSectionReader(f, s3manager.DefaultUploadPartSize, 2*s3manager.DefaultUploadPartSize) + body := io.MultiReader(r1, r2) + + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: body, + }) + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + expectOps := []string{ + "CreateMultipartUpload", + "UploadPart", + "UploadPart", + "CompleteMultipartUpload", + } + if e, a := expectOps, *ops; !reflect.DeepEqual(e, a) { + t.Errorf("expect %v ops, got %v", e, a) + } +} + +func createTempFile(t *testing.T, size int64) (*os.File, func(*testing.T), error) { + file, err := ioutil.TempFile(os.TempDir(), aws.SDKName+t.Name()) + if err != nil { + return nil, nil, err + } + filename := file.Name() + if err := file.Truncate(size); err != nil { + return nil, nil, err + } + + return file, + func(t *testing.T) { + if err := file.Close(); err != nil { + t.Errorf("failed to close temp file, %s, %v", filename, err) + } + if err := os.Remove(filename); err != nil { + t.Errorf("failed to remove temp file, %s, %v", filename, err) + } + }, + nil +} + +func buildFailHandlers(tb testing.TB, parts, retry int) []http.Handler { + handlers := make([]http.Handler, parts) + for i := 0; i < len(handlers); i++ { + handlers[i] = &failPartHandler{ + tb: tb, + failsRemaining: retry, + successHandler: successPartHandler{tb: tb}, + } + } + + return handlers +} + +func TestUploadRetry(t *testing.T) { + const numParts, retries = 3, 10 + + testFile, testFileCleanup, err := createTempFile(t, s3manager.DefaultUploadPartSize*numParts) + if err != nil { + t.Fatalf("failed to create test file, %v", err) + } + defer testFileCleanup(t) + + cases := map[string]struct { + Body io.Reader + PartHandlers func(testing.TB) []http.Handler + }{ + "bytes.Buffer": { + Body: bytes.NewBuffer(make([]byte, s3manager.DefaultUploadPartSize*numParts)), + PartHandlers: func(tb testing.TB) []http.Handler { + return buildFailHandlers(tb, numParts, retries) + }, + }, + "bytes.Reader": { + Body: bytes.NewReader(make([]byte, s3manager.DefaultUploadPartSize*numParts)), + PartHandlers: func(tb testing.TB) []http.Handler { + return buildFailHandlers(tb, numParts, retries) + }, + }, + "os.File": { + Body: testFile, + PartHandlers: func(tb testing.TB) []http.Handler { + return buildFailHandlers(tb, numParts, retries) + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + mux := newMockS3UploadServer(t, c.PartHandlers(t)) + server := httptest.NewServer(mux) + defer server.Close() + + sess := unit.Session.Copy(&aws.Config{ + Endpoint: aws.String(server.URL), + S3ForcePathStyle: aws.Bool(true), + DisableSSL: aws.Bool(true), + Logger: t, + MaxRetries: aws.Int(retries + 1), + SleepDelay: func(time.Duration) {}, + + LogLevel: aws.LogLevel( + aws.LogDebugWithRequestErrors | aws.LogDebugWithRequestRetries, + ), + //Credentials: credentials.AnonymousCredentials, + }) + + uploader := s3manager.NewUploader(sess, func(u *s3manager.Uploader) { + // u.Concurrency = 1 + }) + _, err := uploader.Upload(&s3manager.UploadInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + Body: c.Body, + }) + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + }) + } +} + +type mockS3UploadServer struct { + *http.ServeMux + + tb testing.TB + partHandler []http.Handler +} + +func newMockS3UploadServer(tb testing.TB, partHandler []http.Handler) *mockS3UploadServer { + s := &mockS3UploadServer{ + ServeMux: http.NewServeMux(), + partHandler: partHandler, + tb: tb, + } + + s.HandleFunc("/", s.handleRequest) + + return s +} + +func (s mockS3UploadServer) handleRequest(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + _, hasUploads := r.URL.Query()["uploads"] + + switch { + case r.Method == "POST" && hasUploads: + // CreateMultipartUpload + w.Header().Set("Content-Length", strconv.Itoa(len(createUploadResp))) + w.Write([]byte(createUploadResp)) + + case r.Method == "PUT": + // UploadPart + partNumStr := r.URL.Query().Get("partNumber") + id, err := strconv.Atoi(partNumStr) + if err != nil { + failRequest(w, 400, "BadRequest", + fmt.Sprintf("unable to parse partNumber, %q, %v", + partNumStr, err)) + return + } + id-- + if id < 0 || id >= len(s.partHandler) { + failRequest(w, 400, "BadRequest", + fmt.Sprintf("invalid partNumber %v", id)) + return + } + s.partHandler[id].ServeHTTP(w, r) + + case r.Method == "POST": + // CompleteMultipartUpload + w.Header().Set("Content-Length", strconv.Itoa(len(completeUploadResp))) + w.Write([]byte(completeUploadResp)) + + case r.Method == "DELETE": + // AbortMultipartUpload + w.Header().Set("Content-Length", strconv.Itoa(len(abortUploadResp))) + w.WriteHeader(200) + w.Write([]byte(abortUploadResp)) + + default: + failRequest(w, 400, "BadRequest", + fmt.Sprintf("invalid request %v %v", r.Method, r.URL)) + } +} + +func failRequest(w http.ResponseWriter, status int, code, msg string) { + msg = fmt.Sprintf(baseRequestErrorResp, code, msg) + w.Header().Set("Content-Length", strconv.Itoa(len(msg))) + w.WriteHeader(status) + w.Write([]byte(msg)) +} + +type successPartHandler struct { + tb testing.TB +} + +func (h successPartHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil { + failRequest(w, 400, "BadRequest", + fmt.Sprintf("failed to read body, %v", err)) + return + } + + contLenStr := r.Header.Get("Content-Length") + expectLen, err := strconv.ParseInt(contLenStr, 10, 64) + if err != nil { + h.tb.Logf("expect content-length, got %q, %v", contLenStr, err) + failRequest(w, 400, "BadRequest", + fmt.Sprintf("unable to get content-length %v", err)) + return + } + if e, a := expectLen, n; e != a { + h.tb.Logf("expect %v read, got %v", e, a) + failRequest(w, 400, "BadRequest", + fmt.Sprintf( + "content-length and body do not match, %v, %v", e, a)) + return + } + + w.Header().Set("Content-Length", strconv.Itoa(len(uploadPartResp))) + w.Write([]byte(uploadPartResp)) +} + +type failPartHandler struct { + tb testing.TB + + failsRemaining int + successHandler http.Handler +} + +func (h *failPartHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + if h.failsRemaining == 0 && h.successHandler != nil { + h.successHandler.ServeHTTP(w, r) + return + } + + io.Copy(ioutil.Discard, r.Body) + + failRequest(w, 500, "InternalException", + fmt.Sprintf("mock error, partNumber %v", r.URL.Query().Get("partNumber"))) + + h.failsRemaining-- +} + +const createUploadResp = ` + + bucket + key + abc123 + +` +const uploadPartResp = ` + + key + +` +const baseRequestErrorResp = ` + + %s + %s + request-id + host-id + +` +const completeUploadResp = ` + + bucket + key + key + https://bucket.us-west-2.amazonaws.com/key + abc123 + +` + +const abortUploadResp = ` + + +` diff --git a/service/s3/sse.go b/service/s3/sse.go index 53bdd48c..07c1ff6c 100644 --- a/service/s3/sse.go +++ b/service/s3/sse.go @@ -3,6 +3,7 @@ package s3 import ( "crypto/md5" "encoding/base64" + "net/http" "github.com/IBM/ibm-cos-sdk-go/aws/awserr" "github.com/IBM/ibm-cos-sdk-go/aws/request" @@ -30,25 +31,54 @@ func validateSSERequiresSSL(r *request.Request) { } } -func computeSSEKeys(r *request.Request) { - headers := []string{ - "x-amz-server-side-encryption-customer-key", - "x-amz-copy-source-server-side-encryption-customer-key", +const ( + sseKeyHeader = "x-amz-server-side-encryption-customer-key" + sseKeyMD5Header = sseKeyHeader + "-md5" +) + +func computeSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(sseCustomerKeyGetter); ok { + key = g.getSSECustomerKey() + } + + computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) +} + +const ( + copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" + copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" +) + +func computeCopySourceSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + key = g.getCopySourceSSECustomerKey() } - for _, h := range headers { - md5h := h + "-md5" - if key := r.HTTPRequest.Header.Get(h); key != "" { - // Base64-encode the value - b64v := base64.StdEncoding.EncodeToString([]byte(key)) - r.HTTPRequest.Header.Set(h, b64v) - - // Add MD5 if it wasn't computed - if r.HTTPRequest.Header.Get(md5h) == "" { - sum := md5.Sum([]byte(key)) - b64sum := base64.StdEncoding.EncodeToString(sum[:]) - r.HTTPRequest.Header.Set(md5h, b64sum) - } + computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) +} + +func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { + if len(key) == 0 { + // Backwards compatiablity where user just set the header value instead + // of using the API parameter, or setting the header value for an + // operation without the parameters modeled. + key = r.Header.Get(keyHeader) + if len(key) == 0 { + return } + + // In backwards compatiable, the header's value is not base64 encoded, + // and needs to be encoded and updated by the SDK's customizations. + b64Key := base64.StdEncoding.EncodeToString([]byte(key)) + r.Header.Set(keyHeader, b64Key) + } + + // Only update Key's MD5 if not already set. + if len(r.Header.Get(keyMD5Header)) == 0 { + sum := md5.Sum([]byte(key)) + keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) + r.Header.Set(keyMD5Header, keyMD5) } } diff --git a/service/s3/sse_test.go b/service/s3/sse_test.go index fccc386b..03a4ed24 100644 --- a/service/s3/sse_test.go +++ b/service/s3/sse_test.go @@ -34,9 +34,9 @@ func TestSSECustomerKeyOverHTTPError(t *testing.T) { func TestCopySourceSSECustomerKeyOverHTTPError(t *testing.T) { s := s3.New(unit.Session, &aws.Config{DisableSSL: aws.Bool(true)}) req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ - Bucket: aws.String("bucket"), - CopySource: aws.String("bucket/source"), - Key: aws.String("dest"), + Bucket: aws.String("bucket"), + CopySource: aws.String("bucket/source"), + Key: aws.String("dest"), CopySourceSSECustomerKey: aws.String("key"), }) err := req.Build() @@ -109,3 +109,30 @@ func TestComputeSSEKeysShortcircuit(t *testing.T) { t.Errorf("expected %s, but received %s", e, a) } } + +func TestSSECustomerKeysWithSpaces(t *testing.T) { + s := s3.New(unit.Session) + req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ + Bucket: aws.String("bucket"), + CopySource: aws.String("bucket/source"), + Key: aws.String("dest"), + SSECustomerKey: aws.String(" key "), + CopySourceSSECustomerKey: aws.String(" copykey "), + }) + err := req.Build() + if err != nil { + t.Errorf("expected no error, but received %v", err) + } + if e, a := "ICAga2V5ICAg", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key"); e != a { + t.Errorf("expected %s, but received %s", e, a) + } + if e, a := "ICAgY29weWtleSAgIA==", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key"); e != a { + t.Errorf("expected %s, but received %s", e, a) + } + if e, a := "13XiUSCa6ReZ3CHtCLiJLg==", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5"); e != a { + t.Errorf("expected %s, but received %s", e, a) + } + if e, a := "MHVtfmuml539o1871Vsc6w==", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5"); e != a { + t.Errorf("expected %s, but received %s", e, a) + } +} diff --git a/service/s3/statusok_error.go b/service/s3/statusok_error.go index 8d9b5ca0..772d9fa6 100644 --- a/service/s3/statusok_error.go +++ b/service/s3/statusok_error.go @@ -14,7 +14,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "unable to read response body", err), + awserr.New(request.ErrCodeSerialization, "unable to read response body", err), r.HTTPResponse.StatusCode, r.RequestID, ) @@ -31,7 +31,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { unmarshalError(r) if err, ok := r.Error.(awserr.Error); ok && err != nil { - if err.Code() == "SerializationError" { + if err.Code() == request.ErrCodeSerialization { r.Error = nil return } diff --git a/service/s3/unmarshal_error.go b/service/s3/unmarshal_error.go index 0dee6731..1f6c16a8 100644 --- a/service/s3/unmarshal_error.go +++ b/service/s3/unmarshal_error.go @@ -11,6 +11,7 @@ import ( "github.com/IBM/ibm-cos-sdk-go/aws" "github.com/IBM/ibm-cos-sdk-go/aws/awserr" "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil" ) type xmlErrorResponse struct { @@ -42,29 +43,34 @@ func unmarshalError(r *request.Request) { return } - var errCode, errMsg string - // Attempt to parse error from body if it is known - resp := &xmlErrorResponse{} - err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) - if err != nil && err != io.EOF { - errCode = "SerializationError" - errMsg = "failed to decode S3 XML error response" - } else { - errCode = resp.Code - errMsg = resp.Message + var errResp xmlErrorResponse + err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) + if err == io.EOF { + // Only capture the error if an unmarshal error occurs that is not EOF, + // because S3 might send an error without a error message which causes + // the XML unmarshal to fail with EOF. err = nil } + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } // Fallback to status code converted to message if still no error code - if len(errCode) == 0 { + if len(errResp.Code) == 0 { statusText := http.StatusText(r.HTTPResponse.StatusCode) - errCode = strings.Replace(statusText, " ", "", -1) - errMsg = statusText + errResp.Code = strings.Replace(statusText, " ", "", -1) + errResp.Message = statusText } r.Error = awserr.NewRequestFailure( - awserr.New(errCode, errMsg, err), + awserr.New(errResp.Code, errResp.Message, err), r.HTTPResponse.StatusCode, r.RequestID, )