From c2c79e711d9f5db01ecfc823c6a2017d27610717 Mon Sep 17 00:00:00 2001 From: Klaus Post Date: Mon, 19 Sep 2022 16:07:04 +0200 Subject: [PATCH 1/5] Add v4 unsigned trailing headers --- api-put-object-multipart.go | 71 +++++++++------ api-put-object-streaming.go | 7 +- api-put-object.go | 6 +- api.go | 45 ++++++++-- constants.go | 4 + pkg/signer/request-signature-streaming.go | 90 +++++++++++++++++-- .../request-signature-streaming_test.go | 41 +++++++++ pkg/signer/request-signature-v4.go | 15 ++-- utils.go | 35 ++++++++ 9 files changed, 259 insertions(+), 55 deletions(-) diff --git a/api-put-object-multipart.go b/api-put-object-multipart.go index 5fec17a1c..e4c958882 100644 --- a/api-put-object-multipart.go +++ b/api-put-object-multipart.go @@ -159,8 +159,9 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj crcBytes = append(crcBytes, cSum...) } + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} // Proceed to upload the part. - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader) + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } @@ -269,57 +270,73 @@ func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, object return initiateMultipartUploadResult, nil } +type uploadPartParams struct { + bucketName string + objectName string + uploadID string + reader io.Reader + partNumber int + md5Base64 string + sha256Hex string + size int64 + sse encrypt.ServerSide + streamSha256 bool + customHeader http.Header + trailer http.Header +} + // uploadPart - Uploads a part in a multipart upload. -func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName string, uploadID string, reader io.Reader, partNumber int, md5Base64 string, sha256Hex string, size int64, sse encrypt.ServerSide, streamSha256 bool, customHeader http.Header) (ObjectPart, error) { +func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) { // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(p.bucketName); err != nil { return ObjectPart{}, err } - if err := s3utils.CheckValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(p.objectName); err != nil { return ObjectPart{}, err } - if size > maxPartSize { - return ObjectPart{}, errEntityTooLarge(size, maxPartSize, bucketName, objectName) + if p.size > maxPartSize { + return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName) } - if size <= -1 { - return ObjectPart{}, errEntityTooSmall(size, bucketName, objectName) + if p.size <= -1 { + return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName) } - if partNumber <= 0 { + if p.partNumber <= 0 { return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") } - if uploadID == "" { + if p.uploadID == "" { return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") } // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set part number. - urlValues.Set("partNumber", strconv.Itoa(partNumber)) + urlValues.Set("partNumber", strconv.Itoa(p.partNumber)) // Set upload id. - urlValues.Set("uploadId", uploadID) + urlValues.Set("uploadId", p.uploadID) // Set encryption headers, if any. - if customHeader == nil { - customHeader = make(http.Header) + if p.customHeader == nil { + p.customHeader = make(http.Header) } // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html // Server-side encryption is supported by the S3 Multipart Upload actions. // Unless you are using a customer-provided encryption key, you don't need // to specify the encryption parameters in each UploadPart request. - if sse != nil && sse.Type() == encrypt.SSEC { - sse.Marshal(customHeader) + if p.sse != nil && p.sse.Type() == encrypt.SSEC { + p.sse.Marshal(p.customHeader) } reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, + bucketName: p.bucketName, + objectName: p.objectName, queryValues: urlValues, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Base64: md5Base64, - contentSHA256Hex: sha256Hex, - streamSha256: streamSha256, + customHeader: p.customHeader, + contentBody: p.reader, + contentLength: p.size, + contentMD5Base64: p.md5Base64, + contentSHA256Hex: p.sha256Hex, + streamSha256: p.streamSha256, + trailer: p.trailer, } // Execute PUT on each part. @@ -330,7 +347,7 @@ func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName s } if resp != nil { if resp.StatusCode != http.StatusOK { - return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) + return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName) } } // Once successfully uploaded, return completed part. @@ -341,8 +358,8 @@ func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName s ChecksumSHA1: h.Get("x-amz-checksum-sha1"), ChecksumSHA256: h.Get("x-amz-checksum-sha256"), } - objPart.Size = size - objPart.PartNumber = partNumber + objPart.Size = p.size + objPart.PartNumber = p.partNumber // Trim off the odd double quotes from ETag in the beginning and end. objPart.ETag = trimEtag(h.Get("ETag")) return objPart, nil diff --git a/api-put-object-streaming.go b/api-put-object-streaming.go index 464bde7f3..4ca6ae61d 100644 --- a/api-put-object-streaming.go +++ b/api-put-object-streaming.go @@ -184,7 +184,8 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) // Proceed to upload the part. - objPart, err := c.uploadPart(ctx, bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, "", "", partSize, opts.ServerSideEncryption, !opts.DisableContentSha256, nil) + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: sectionReader, partNumber: uploadReq.PartNum, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256} + objPart, err := c.uploadPart(ctx, p) if err != nil { uploadedPartsCh <- uploadedPartRes{ Error: err, @@ -339,8 +340,8 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b // Update progress reader appropriately to the latest offset // as we read from the source. hooked := newHook(bytes.NewReader(buf[:length]), opts.Progress) - - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, hooked, partNumber, md5Base64, "", partSize, opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader) + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: hooked, partNumber: partNumber, md5Base64: md5Base64, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } diff --git a/api-put-object.go b/api-put-object.go index 321ad00aa..0fd0e5000 100644 --- a/api-put-object.go +++ b/api-put-object.go @@ -269,6 +269,9 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str } if size < 0 { + if opts.DisableMultipart { + return UploadInfo{}, errors.New("no length provided and multipart disabled") + } return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) } @@ -366,7 +369,8 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) // Proceed to upload the part. - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, md5Base64, "", int64(length), opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader) + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } diff --git a/api.go b/api.go index d051781b9..16e8e7584 100644 --- a/api.go +++ b/api.go @@ -20,8 +20,10 @@ package minio import ( "bytes" "context" + "encoding/base64" "errors" "fmt" + "hash/crc32" "io" "io/ioutil" "math/rand" @@ -117,7 +119,7 @@ const ( // User Agent should always following the below style. // Please open an issue to discuss any new changes here. // -// MinIO (OS; ARCH) LIB/VER APP/VER +// MinIO (OS; ARCH) LIB/VER APP/VER const ( libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion @@ -312,9 +314,9 @@ func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { // Hash materials provides relevant initialized hash algo writers // based on the expected signature type. // -// - For signature v4 request if the connection is insecure compute only sha256. -// - For signature v4 request if the connection is secure compute only md5. -// - For anonymous request compute md5. +// - For signature v4 request if the connection is insecure compute only sha256. +// - For signature v4 request if the connection is secure compute only md5. +// - For anonymous request compute md5. func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) { hashSums = make(map[string][]byte) hashAlgos = make(map[string]md5simd.Hasher) @@ -419,6 +421,8 @@ type requestMetadata struct { contentMD5Base64 string // carries base64 encoded md5sum contentSHA256Hex string // carries hex encoded sha256sum streamSha256 bool + addCrc bool + trailer http.Header // (http.Request).Trailer. Requires v4 signature. } // dumpHTTP - dump HTTP request and response. @@ -581,6 +585,17 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ } } + if metadata.addCrc { + if metadata.trailer == nil { + metadata.trailer = make(http.Header, 1) + } + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) { + // Update trailer when done. + metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) + }) + metadata.trailer.Set("x-amz-checksum-crc32c", "") + } // Instantiate a new request. var req *http.Request req, err = c.newRequest(ctx, method, metadata) @@ -632,7 +647,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ // code dictates invalid region, we can retry the request // with the new region. // - // Additionally we should only retry if bucketLocation and custom + // Additionally, we should only retry if bucketLocation and custom // region is empty. if c.region == "" { switch errResponse.Code { @@ -814,14 +829,28 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request // Add signature version '2' authorization header. req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) case metadata.streamSha256 && !c.secure: - // Streaming signature is used by default for a PUT object request. Additionally we also - // look if the initialized client is secure, if yes then we don't need to perform - // streaming signature. + if len(metadata.trailer) > 0 { + req.Trailer = metadata.trailer + } + // Streaming signature is used by default for a PUT object request. + // Additionally, we also look if the initialized client is secure, + // if yes then we don't need to perform streaming signature. req = signer.StreamingSignV4(req, accessKeyID, secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) default: // Set sha256 sum for signature calculation only with signature version '4'. shaHeader := unsignedPayload + if len(metadata.trailer) > 0 && metadata.contentSHA256Hex == "" { + // Set as Go trailer. + // This requires transfer to be chunked. + req.Trailer = metadata.trailer + req.TransferEncoding = []string{"chunked"} + // Add trailer keys to amz header. + for k := range metadata.trailer { + req.Header.Add("X-Amz-Trailer", k) + } + shaHeader = unsignedPayloadTrailer + } if metadata.contentSHA256Hex != "" { shaHeader = metadata.contentSHA256Hex } diff --git a/constants.go b/constants.go index dee83b870..5c2bab310 100644 --- a/constants.go +++ b/constants.go @@ -46,6 +46,10 @@ const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 // we don't want to sign the request payload const unsignedPayload = "UNSIGNED-PAYLOAD" +// unsignedPayloadTrailer value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload, but have a trailer. +const unsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" + // Total number of parallel workers used for multipart operation. const totalWorkers = 4 diff --git a/pkg/signer/request-signature-streaming.go b/pkg/signer/request-signature-streaming.go index b1296d2b1..094a04e32 100644 --- a/pkg/signer/request-signature-streaming.go +++ b/pkg/signer/request-signature-streaming.go @@ -32,13 +32,15 @@ import ( // Reference for constants used below - // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming const ( - streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" - emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - payloadChunkSize = 64 * 1024 - chunkSigConstLen = 17 // ";chunk-signature=" - signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" - crlfLen = 2 // CRLF + streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER" + streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" + streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER" + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + payloadChunkSize = 64 * 1024 + chunkSigConstLen = 17 // ";chunk-signature=" + signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" + crlfLen = 2 // CRLF ) // Request headers to be ignored while calculating seed signature for @@ -91,11 +93,33 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [ return strings.Join(stringToSignParts, "\n") } +// buildTrailerChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildTrailerChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { + stringToSignParts := []string{ + streamingTrailerHdr, + t.Format(iso8601DateFormat), + getScope(region, t, ServiceTypeS3), + previousSig, + hex.EncodeToString(sum256(chunkData)), + } + + return strings.Join(stringToSignParts, "\n") +} + // prepareStreamingRequest - prepares a request with appropriate // headers before computing the seed signature. func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { // Set x-amz-content-sha256 header. - req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + if len(req.Trailer) == 0 { + req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + } else { + req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm) + for k := range req.Trailer { + req.Header.Add("X-Amz-Trailer", k) + } + } + if sessionToken != "" { req.Header.Set("X-Amz-Security-Token", sessionToken) } @@ -122,6 +146,17 @@ func buildChunkSignature(chunkData []byte, reqTime time.Time, region, return getSignature(signingKey, chunkStringToSign) } +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildTrailerChunkSignature(chunkData []byte, reqTime time.Time, region, + previousSignature, secretAccessKey string, +) string { + chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region, + previousSignature, chunkData) + fmt.Println("chunkStringToSign:", chunkStringToSign) + signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) + return getSignature(signingKey, chunkStringToSign) +} + // getSeedSignature - returns the seed signature for a given request. func (s *StreamingReader) setSeedSignature(req *http.Request) { // Get canonical request @@ -156,6 +191,7 @@ type StreamingReader struct { chunkNum int totalChunks int lastChunkSize int + trailer *http.Header } // signChunk - signs a chunk read from s.baseReader of chunkLen size. @@ -182,6 +218,33 @@ func (s *StreamingReader) signChunk(chunkLen int) { s.chunkNum++ } +// addSignedTrailer - adds a trailer with the provided headers, +// then signs a chunk and adds it to output. +func (s *StreamingReader) addSignedTrailer(h http.Header) { + olen := len(s.chunkBuf) + s.chunkBuf = s.chunkBuf[:0] + for k, v := range h { + if len(v) > 0 { + s.chunkBuf = append(s.chunkBuf, []byte(k+":"+v[0]+"\n")...) + } + } + + // Compute chunk signature + signature := buildTrailerChunkSignature(s.chunkBuf, s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + s.buf.Write(s.chunkBuf) + fmt.Fprintf(&s.buf, "x-amz-trailer-signature:%s\r\n", signature) + + // Reset chunkBufLen for next chunk read. + s.chunkBuf = s.chunkBuf[:olen] + s.chunkBufLen = 0 + s.chunkNum++ +} + // setStreamingAuthHeader - builds and sets authorization header value // for streaming signature. func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { @@ -222,6 +285,11 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, lastChunkSize: int(dataLen % payloadChunkSize), } + if len(req.Trailer) > 0 { + stReader.trailer = &req.Trailer + // Remove from request. + req.Trailer = nil + } // Add the request headers required for chunk upload signing. @@ -290,6 +358,12 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { // Sign the chunk and write it to s.buf. s.signChunk(0) + if s.trailer != nil { + // Trailer must be set now. + s.addSignedTrailer(*s.trailer) + // Remove... + *s.trailer = http.Header{} + } break } return 0, err diff --git a/pkg/signer/request-signature-streaming_test.go b/pkg/signer/request-signature-streaming_test.go index 43b614ee8..fa946ac73 100644 --- a/pkg/signer/request-signature-streaming_test.go +++ b/pkg/signer/request-signature-streaming_test.go @@ -63,6 +63,20 @@ func TestChunkSignature(t *testing.T) { } } +// Example on https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html +func TestTrailerChunkSignature(t *testing.T) { + chunkData := []byte("x-amz-checksum-crc32c:wdBDMA==\n") + reqTime, _ := time.Parse(iso8601DateFormat, "20130524T000000Z") + previousSignature := "e05ab64fe1dfdbf0b5870abbaabdb063c371d4e96f2767e6934d90529c5ae850" + location := "us-east-1" + secretAccessKeyID := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + expectedSignature := "41e14ac611e27a8bb3d66c3bad6856f209297767d5dd4fc87d8fa9e422e03faf" + actualSignature := buildTrailerChunkSignature(chunkData, reqTime, location, previousSignature, secretAccessKeyID) + if actualSignature != expectedSignature { + t.Errorf("Expected %s but received %s", expectedSignature, actualSignature) + } +} + func TestSetStreamingAuthorization(t *testing.T) { location := "us-east-1" secretAccessKeyID := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" @@ -85,6 +99,33 @@ func TestSetStreamingAuthorization(t *testing.T) { } } +// Test against https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html +func TestSetStreamingAuthorizationTrailer(t *testing.T) { + location := "us-east-1" + secretAccessKeyID := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + accessKeyID := "AKIAIOSFODNN7EXAMPLE" + + req := NewRequest(http.MethodPut, "/examplebucket/chunkObject.txt", nil) + req.Header.Set("Content-Encoding", "aws-chunked") + req.Header.Set("x-amz-decoded-content-length", "66560") + req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") + req.Host = "" + req.URL.Host = "s3.amazonaws.com" + req.Trailer = http.Header{"x-amz-checksum-crc32c": nil} + + dataLen := int64(65 * 1024) + reqTime, _ := time.Parse(iso8601DateFormat, "20130524T000000Z") + req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, "", location, dataLen, reqTime) + + // (order of signed headers is different) + expectedAuthorization := "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=content-encoding;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-storage-class;x-amz-trailer,Signature=106e2a8a18243abcf37539882f36619c00e2dfc72633413f02d3b74544bfeb8e" + + actualAuthorization := req.Header.Get("Authorization") + if actualAuthorization != expectedAuthorization { + t.Errorf("Expected \n%s but received \n%s", expectedAuthorization, actualAuthorization) + } +} + func TestStreamingReader(t *testing.T) { reqTime, _ := time.Parse("20060102T150405Z", "20130524T000000Z") location := "us-east-1" diff --git a/pkg/signer/request-signature-v4.go b/pkg/signer/request-signature-v4.go index a12608ebb..655d1dbed 100644 --- a/pkg/signer/request-signature-v4.go +++ b/pkg/signer/request-signature-v4.go @@ -42,7 +42,6 @@ const ( ServiceTypeSTS = "sts" ) -// // Excerpts from @lsegal - // https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. // @@ -57,7 +56,6 @@ const ( // * Accept-Encoding // Some S3 servers like Hitachi Content Platform do not honor this header for signature // calculation. -// var v4IgnoredHeaders = map[string]bool{ "Accept-Encoding": true, "Authorization": true, @@ -177,12 +175,13 @@ func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { // getCanonicalRequest generate a canonical request of style. // // canonicalRequest = -// \n -// \n -// \n -// \n -// \n -// +// +// \n +// \n +// \n +// \n +// \n +// func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") canonicalRequest := strings.Join([]string{ diff --git a/utils.go b/utils.go index f32f84ab0..7e1789a51 100644 --- a/utils.go +++ b/utils.go @@ -627,3 +627,38 @@ func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { } return false } + +// newHashReaderWrapper will hash all reads done through r. +// When r returns io.EOF the done function will be called with the sum. +func newHashReaderWrapper(r io.Reader, h hash.Hash, done func(hash []byte)) *hashReaderWrapper { + return &hashReaderWrapper{ + r: r, + h: h, + done: done, + } +} + +type hashReaderWrapper struct { + r io.Reader + h hash.Hash + done func(hash []byte) +} + +// Read implements the io.Reader interface. +func (h *hashReaderWrapper) Read(p []byte) (n int, err error) { + n, err = h.r.Read(p) + if n > 0 { + n2, err := h.h.Write(p[:n]) + if err != nil { + return 0, err + } + if n2 != n { + return 0, io.ErrShortWrite + } + } + if err == io.EOF { + // Call back + h.done(h.h.Sum(nil)) + } + return n, err +} From 59c231fa768b04d4d1601ca7fff88f90a2b55ba7 Mon Sep 17 00:00:00 2001 From: Klaus Post Date: Tue, 11 Oct 2022 16:37:57 +0200 Subject: [PATCH 2/5] Fix signed and unsigned trailing headers. --- api-put-object-streaming.go | 50 +++- api-s3-datatypes.go | 8 +- api.go | 30 ++- core.go | 15 +- ...st-signature-streaming-unsigned-trailer.go | 231 ++++++++++++++++++ pkg/signer/request-signature-streaming.go | 47 ++-- .../request-signature-streaming_test.go | 6 +- pkg/signer/request-signature-v4.go | 26 +- 8 files changed, 367 insertions(+), 46 deletions(-) create mode 100644 pkg/signer/request-signature-streaming-unsigned-trailer.go diff --git a/api-put-object-streaming.go b/api-put-object-streaming.go index 4ca6ae61d..471b1b5e4 100644 --- a/api-put-object-streaming.go +++ b/api-put-object-streaming.go @@ -107,11 +107,19 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN return UploadInfo{}, err } + withChecksum := c.trailingHeaderSupport + if withChecksum { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + } // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return UploadInfo{}, err } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") // Aborts the multipart upload in progress, if the // function returns any error, since we do not resume @@ -177,14 +185,32 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // As a special case if partNumber is lastPartNumber, we // calculate the offset based on the last part size. if uploadReq.PartNum == lastPartNumber { - readOffset = (size - lastPartSize) + readOffset = size - lastPartSize partSize = lastPartSize } sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) + var trailer = make(http.Header, 1) + if withChecksum { + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) + sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) { + trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) + }) + } // Proceed to upload the part. - p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: sectionReader, partNumber: uploadReq.PartNum, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256} + p := uploadPartParams{bucketName: bucketName, + objectName: objectName, + uploadID: uploadID, + reader: sectionReader, + partNumber: uploadReq.PartNum, + size: partSize, + sse: opts.ServerSideEncryption, + streamSha256: !opts.DisableContentSha256, + sha256Hex: "", + trailer: trailer, + } objPart, err := c.uploadPart(ctx, p) if err != nil { uploadedPartsCh <- uploadedPartRes{ @@ -222,8 +248,12 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // Update the totalUploadedSize. totalUploadedSize += uploadRes.Size complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: uploadRes.Part.ETag, - PartNumber: uploadRes.Part.PartNumber, + ETag: uploadRes.Part.ETag, + PartNumber: uploadRes.Part.PartNumber, + ChecksumCRC32: uploadRes.Part.ChecksumCRC32, + ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C, + ChecksumSHA1: uploadRes.Part.ChecksumSHA1, + ChecksumSHA256: uploadRes.Part.ChecksumSHA256, }) } } @@ -236,6 +266,18 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) + if withChecksum { + // Add hash of hashes. + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + for _, part := range complMultipartUpload.Parts { + cs, err := base64.StdEncoding.DecodeString(part.ChecksumCRC32C) + if err == nil { + crc.Write(cs) + } + } + opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) if err != nil { return UploadInfo{}, err diff --git a/api-s3-datatypes.go b/api-s3-datatypes.go index b1b848f4a..f0ba0d94b 100644 --- a/api-s3-datatypes.go +++ b/api-s3-datatypes.go @@ -323,10 +323,10 @@ type CompletePart struct { ETag string // Checksum values - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string + ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` + ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` + ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` + ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` } // completeMultipartUpload container for completing multipart upload. diff --git a/api.go b/api.go index 0df181d42..536849d29 100644 --- a/api.go +++ b/api.go @@ -95,6 +95,8 @@ type Client struct { sha256Hasher func() md5simd.Hasher healthStatus int32 + + trailingHeaderSupport bool } // Options for New method @@ -105,6 +107,9 @@ type Options struct { Region string BucketLookup BucketLookupType + // TrailingHeaders indicates server support of trailing headers. + TrailingHeaders bool + // Custom hash routines. Leave nil to use standard. CustomMD5 func() md5simd.Hasher CustomSHA256 func() md5simd.Hasher @@ -248,6 +253,9 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { if clnt.sha256Hasher == nil { clnt.sha256Hasher = newSHA256Hasher } + + clnt.trailingHeaderSupport = opts.TrailingHeaders + // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. clnt.lookup = opts.BucketLookup @@ -594,7 +602,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ // Update trailer when done. metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) }) - metadata.trailer.Set("x-amz-checksum-crc32c", "") + metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) } // Instantiate a new request. var req *http.Request @@ -607,6 +615,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ return nil, err } + // Initiate the request. res, err = c.do(req) if err != nil { @@ -840,24 +849,19 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request default: // Set sha256 sum for signature calculation only with signature version '4'. shaHeader := unsignedPayload - if len(metadata.trailer) > 0 && metadata.contentSHA256Hex == "" { - // Set as Go trailer. - // This requires transfer to be chunked. - req.Trailer = metadata.trailer - req.TransferEncoding = []string{"chunked"} - // Add trailer keys to amz header. - for k := range metadata.trailer { - req.Header.Add("X-Amz-Trailer", k) - } - shaHeader = unsignedPayloadTrailer - } if metadata.contentSHA256Hex != "" { shaHeader = metadata.contentSHA256Hex + if len(metadata.trailer) > 0 { + // Sanity check, we should not end up here if upstream is sane. + return nil, errors.New("internal error: contentSHA256Hex with trailer not supported") + } + } else if len(metadata.trailer) > 0 { + shaHeader = unsignedPayloadTrailer } req.Header.Set("X-Amz-Content-Sha256", shaHeader) // Add signature version '4' authorization header. - req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) + req = signer.SignV4Trailer(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer) } // Return request. diff --git a/core.go b/core.go index f6132e79a..207a38702 100644 --- a/core.go +++ b/core.go @@ -88,8 +88,19 @@ func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarke // PutObjectPart - Upload an object part. func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { - streamSha256 := true - return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse, streamSha256, nil) + p := uploadPartParams{ + bucketName: bucket, + objectName: object, + uploadID: uploadID, + reader: data, + partNumber: partID, + md5Base64: md5Base64, + sha256Hex: sha256Hex, + size: size, + sse: sse, + streamSha256: true, + } + return c.uploadPart(ctx, p) } // ListObjectParts - List uploaded parts of an incomplete upload.x diff --git a/pkg/signer/request-signature-streaming-unsigned-trailer.go b/pkg/signer/request-signature-streaming-unsigned-trailer.go new file mode 100644 index 000000000..64de1ca29 --- /dev/null +++ b/pkg/signer/request-signature-streaming-unsigned-trailer.go @@ -0,0 +1,231 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + "time" +) + +// getUnsignedChunkLength - calculates the length of chunk metadata +func getUnsignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + crlfLen + + chunkDataSize + + crlfLen +} + +// getUSStreamLength - calculates the length of the overall stream (data + metadata) +func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getUnsignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getUnsignedChunkLength(remainingBytes) + } + streamLen += getUnsignedChunkLength(0) + if len(trailers) > 0 { + for name, placeholder := range trailers { + if len(placeholder) > 0 { + streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) + } + } + streamLen += crlfLen + } + + return streamLen +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { + req.TransferEncoding = []string{"aws-chunked"} + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + // Set content length with streaming signature for each chunk included. + req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) +} + +// buildChunkHeader - returns the chunk header. +// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n +func buildUSChunkHeader(chunkLen int64) []byte { + return []byte(strconv.FormatInt(chunkLen, 16) + ";\r\n") +} + +// StreamingUSReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingUSReader struct { + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + chunkNum int + totalChunks int + lastChunkSize int + trailer http.Header +} + +// writeChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) { + s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n") + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + if addCrLf { + s.buf.Write([]byte("\r\n")) + } + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// addSignedTrailer - adds a trailer with the provided headers, +// then signs a chunk and adds it to output. +func (s *StreamingUSReader) addTrailer(h http.Header) { + olen := len(s.chunkBuf) + s.chunkBuf = s.chunkBuf[:0] + for k, v := range h { + s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) + } + + s.buf.Write(s.chunkBuf) + s.buf.WriteString("\r\n\r\n") + + // Reset chunkBufLen for next chunk read. + s.chunkBuf = s.chunkBuf[:olen] + s.chunkBufLen = 0 + s.chunkNum++ +} + +// StreamingUnsignedV4 - provides chunked upload +func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request { + // Set headers needed for streaming signature. + prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime) + + if req.Body == nil { + req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) + } + + stReader := &StreamingUSReader{ + baseReadCloser: req.Body, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + } + if len(req.Trailer) > 0 { + stReader.trailer = req.Trailer + // Remove... + req.Trailer = nil + } + + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingUSReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + // Usually we validate `err` first, but in this case + // we are validating n > 0 for the following reasons. + // + // 1. n > 0, err is one of io.EOF, nil (near end of stream) + // A Reader returning a non-zero number of bytes at the end + // of the input stream may return either err == EOF or err == nil + // + // 2. n == 0, err is io.EOF (actual end of stream) + // + // Callers should always process the n > 0 bytes returned + // before considering the error err. + if n1 > 0 { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.writeChunk(s.chunkBufLen, true) + break + } + } + if err != nil { + if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) + } + + // Sign the chunk and write it to s.buf. + s.writeChunk(0, len(s.trailer) == 0) + if len(s.trailer) > 0 { + // Trailer must be set now. + s.addTrailer(s.trailer) + } + break + } + return 0, err + } + + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingUSReader) Close() error { + return s.baseReadCloser.Close() +} diff --git a/pkg/signer/request-signature-streaming.go b/pkg/signer/request-signature-streaming.go index 094a04e32..49e999b01 100644 --- a/pkg/signer/request-signature-streaming.go +++ b/pkg/signer/request-signature-streaming.go @@ -41,6 +41,8 @@ const ( chunkSigConstLen = 17 // ";chunk-signature=" signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" crlfLen = 2 // CRLF + trailerKVSeparator = ":" + trailerSignature = "x-amz-trailer-signature" ) // Request headers to be ignored while calculating seed signature for @@ -62,7 +64,7 @@ func getSignedChunkLength(chunkDataSize int64) int64 { } // getStreamLength - calculates the length of the overall stream (data + metadata) -func getStreamLength(dataLen, chunkSize int64) int64 { +func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { if dataLen <= 0 { return 0 } @@ -75,6 +77,15 @@ func getStreamLength(dataLen, chunkSize int64) int64 { streamLen += getSignedChunkLength(remainingBytes) } streamLen += getSignedChunkLength(0) + if len(trailers) > 0 { + for name, placeholder := range trailers { + if len(placeholder) > 0 { + streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) + } + } + streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen + } + return streamLen } @@ -116,8 +127,9 @@ func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int } else { req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm) for k := range req.Trailer { - req.Header.Add("X-Amz-Trailer", k) + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) } + req.TransferEncoding = []string{"aws-chunked"} } if sessionToken != "" { @@ -126,7 +138,7 @@ func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) // Set content length with streaming signature for each chunk included. - req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) + req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) } @@ -152,7 +164,6 @@ func buildTrailerChunkSignature(chunkData []byte, reqTime time.Time, region, ) string { chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region, previousSignature, chunkData) - fmt.Println("chunkStringToSign:", chunkStringToSign) signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) return getSignature(signingKey, chunkStringToSign) } @@ -191,11 +202,11 @@ type StreamingReader struct { chunkNum int totalChunks int lastChunkSize int - trailer *http.Header + trailer http.Header } // signChunk - signs a chunk read from s.baseReader of chunkLen size. -func (s *StreamingReader) signChunk(chunkLen int) { +func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) { // Compute chunk signature for next header signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, s.region, s.prevSignature, s.secretAccessKey) @@ -211,7 +222,9 @@ func (s *StreamingReader) signChunk(chunkLen int) { s.buf.Write(s.chunkBuf[:chunkLen]) // Write the chunk trailer. - s.buf.Write([]byte("\r\n")) + if addCrLf { + s.buf.Write([]byte("\r\n")) + } // Reset chunkBufLen for next chunk read. s.chunkBufLen = 0 @@ -224,9 +237,7 @@ func (s *StreamingReader) addSignedTrailer(h http.Header) { olen := len(s.chunkBuf) s.chunkBuf = s.chunkBuf[:0] for k, v := range h { - if len(v) > 0 { - s.chunkBuf = append(s.chunkBuf, []byte(k+":"+v[0]+"\n")...) - } + s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) } // Compute chunk signature @@ -237,7 +248,7 @@ func (s *StreamingReader) addSignedTrailer(h http.Header) { s.prevSignature = signature s.buf.Write(s.chunkBuf) - fmt.Fprintf(&s.buf, "x-amz-trailer-signature:%s\r\n", signature) + s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n") // Reset chunkBufLen for next chunk read. s.chunkBuf = s.chunkBuf[:olen] @@ -286,8 +297,8 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok lastChunkSize: int(dataLen % payloadChunkSize), } if len(req.Trailer) > 0 { - stReader.trailer = &req.Trailer - // Remove from request. + stReader.trailer = req.Trailer + // Remove... req.Trailer = nil } @@ -340,7 +351,7 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { (s.chunkNum == s.totalChunks-1 && s.chunkBufLen == s.lastChunkSize) { // Sign the chunk and write it to s.buf. - s.signChunk(s.chunkBufLen) + s.signChunk(s.chunkBufLen, true) break } } @@ -357,12 +368,10 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { } // Sign the chunk and write it to s.buf. - s.signChunk(0) - if s.trailer != nil { + s.signChunk(0, len(s.trailer) == 0) + if len(s.trailer) > 0 { // Trailer must be set now. - s.addSignedTrailer(*s.trailer) - // Remove... - *s.trailer = http.Header{} + s.addSignedTrailer(s.trailer) } break } diff --git a/pkg/signer/request-signature-streaming_test.go b/pkg/signer/request-signature-streaming_test.go index fa946ac73..29c73725d 100644 --- a/pkg/signer/request-signature-streaming_test.go +++ b/pkg/signer/request-signature-streaming_test.go @@ -19,6 +19,7 @@ package signer import ( "bytes" + "encoding/hex" "io/ioutil" "net/http" "testing" @@ -111,7 +112,8 @@ func TestSetStreamingAuthorizationTrailer(t *testing.T) { req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") req.Host = "" req.URL.Host = "s3.amazonaws.com" - req.Trailer = http.Header{"x-amz-checksum-crc32c": nil} + req.Trailer = http.Header{} + req.Trailer.Set("x-amz-checksum-crc32c", "wdBDMA==") dataLen := int64(65 * 1024) reqTime, _ := time.Parse(iso8601DateFormat, "20130524T000000Z") @@ -124,6 +126,8 @@ func TestSetStreamingAuthorizationTrailer(t *testing.T) { if actualAuthorization != expectedAuthorization { t.Errorf("Expected \n%s but received \n%s", expectedAuthorization, actualAuthorization) } + chunkData := []byte("x-amz-checksum-crc32c:wdBDMA==\n") + t.Log(hex.EncodeToString(sum256(chunkData))) } func TestStreamingReader(t *testing.T) { diff --git a/pkg/signer/request-signature-v4.go b/pkg/signer/request-signature-v4.go index 655d1dbed..34914490c 100644 --- a/pkg/signer/request-signature-v4.go +++ b/pkg/signer/request-signature-v4.go @@ -263,11 +263,11 @@ func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l // SignV4STS - signature v4 for STS request. func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS) + return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil) } // Internal function called for different service types. -func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string) *http.Request { +func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request { // Signature calculation is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -284,6 +284,15 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati req.Header.Set("X-Amz-Security-Token", sessionToken) } + if len(trailer) > 0 { + for k := range trailer { + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) + } + + req.TransferEncoding = []string{"aws-chunked"} + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10)) + } + hashedPayload := getHashedPayload(req) if serviceType == ServiceTypeSTS { // Content sha256 header is not sent with the request @@ -321,11 +330,22 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati auth := strings.Join(parts, ", ") req.Header.Set("Authorization", auth) + if len(trailer) > 0 { + // Use custom chunked encoding. + req.Trailer = trailer + return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC()) + } return &req } // SignV4 sign the request before Do(), in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3) + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil) +} + +// SignV4Trailer sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer) } From 564ba818ad776e99b430ae9a21d66956ddc5d728 Mon Sep 17 00:00:00 2001 From: Klaus Post Date: Tue, 11 Oct 2022 16:47:49 +0200 Subject: [PATCH 3/5] CI happiness --- pkg/signer/request-signature-streaming-unsigned-trailer.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/pkg/signer/request-signature-streaming-unsigned-trailer.go b/pkg/signer/request-signature-streaming-unsigned-trailer.go index 64de1ca29..5838b9de9 100644 --- a/pkg/signer/request-signature-streaming-unsigned-trailer.go +++ b/pkg/signer/request-signature-streaming-unsigned-trailer.go @@ -75,12 +75,6 @@ func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen i req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) } -// buildChunkHeader - returns the chunk header. -// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n -func buildUSChunkHeader(chunkLen int64) []byte { - return []byte(strconv.FormatInt(chunkLen, 16) + ";\r\n") -} - // StreamingUSReader implements chunked upload signature as a reader on // top of req.Body's ReaderCloser chunk header;data;... repeat type StreamingUSReader struct { From 0835557848ee370454ccf75e40c516266338a65d Mon Sep 17 00:00:00 2001 From: Klaus Post Date: Tue, 11 Oct 2022 16:54:00 +0200 Subject: [PATCH 4/5] Only enable when v4. --- api.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/api.go b/api.go index ac68a9398..abb5241c8 100644 --- a/api.go +++ b/api.go @@ -108,6 +108,7 @@ type Options struct { BucketLookup BucketLookupType // TrailingHeaders indicates server support of trailing headers. + // Only supported for v4 signatures. TrailingHeaders bool // Custom hash routines. Leave nil to use standard. @@ -254,7 +255,7 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { clnt.sha256Hasher = newSHA256Hasher } - clnt.trailingHeaderSupport = opts.TrailingHeaders + clnt.trailingHeaderSupport = opts.TrailingHeaders && !clnt.overrideSignerType.IsV4() // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. From 6c7eee12e2b9e10d521ab088ba15585b7c4af51d Mon Sep 17 00:00:00 2001 From: Klaus Post Date: Mon, 17 Oct 2022 09:54:58 +0200 Subject: [PATCH 5/5] Update api.go Co-authored-by: Harshavardhana --- api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api.go b/api.go index abb5241c8..21ba3982b 100644 --- a/api.go +++ b/api.go @@ -255,7 +255,7 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { clnt.sha256Hasher = newSHA256Hasher } - clnt.trailingHeaderSupport = opts.TrailingHeaders && !clnt.overrideSignerType.IsV4() + clnt.trailingHeaderSupport = opts.TrailingHeaders && clnt.overrideSignerType.IsV4() // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.