From add6a284ea0f844fd6628cba637be5451fe4b28a Mon Sep 17 00:00:00 2001 From: Jacob Hoffman-Andrews Date: Thu, 7 Mar 2024 13:35:47 -0800 Subject: [PATCH] v3: backport decompression limit fix (#107) Backport from #106. Note that there was a merge conflict on the CHANGELOG and I decided to just accept all the v4 entries. It's okay for the v3 branch's CHANGELOG to also talk about v4 releases. I'll send a separate PR updating the v4 CHANGELOG to include v3.0.3. --- CHANGELOG.md | 53 ++++++++++++++++++++++++++++++++++++++++++++++++ crypter.go | 6 ++++++ encoding.go | 21 +++++++++++++++---- encoding_test.go | 34 +++++++++++++++++++++++++++++++ 4 files changed, 110 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ae6cff..ce2a54e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,56 @@ +# v4.0.1 + +## Fixed + + - An attacker could send a JWE containing compressed data that used large + amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`. + Those functions now return an error if the decompressed data would exceed + 250kB or 10x the compressed size (whichever is larger). Thanks to + Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj) + for reporting. + +# v4.0.0 + +This release makes some breaking changes in order to more thoroughly +address the vulnerabilities discussed in [Three New Attacks Against JSON Web +Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot +token". + +## Changed + + - Limit JWT encryption types (exclude password or public key types) (#78) + - Enforce minimum length for HMAC keys (#85) + - jwt: match any audience in a list, rather than requiring all audiences (#81) + - jwt: accept only Compact Serialization (#75) + - jws: Add expected algorithms for signatures (#74) + - Require specifying expected algorithms for ParseEncrypted, + ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned, + jwt.ParseSignedAndEncrypted (#69, #74) + - Usually there is a small, known set of appropriate algorithms for a program + to use and it's a mistake to allow unexpected algorithms. For instance the + "billion hash attack" relies in part on programs accepting the PBES2 + encryption algorithm and doing the necessary work even if they weren't + specifically configured to allow PBES2. + - Revert "Strip padding off base64 strings" (#82) + - The specs require base64url encoding without padding. + - Minimum supported Go version is now 1.21 + +## Added + + - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON. + - These allow parsing a specific serialization, as opposed to ParseSigned and + ParseEncrypted, which try to automatically detect which serialization was + provided. It's common to require a specific serialization for a specific + protocol - for instance JWT requires Compact serialization. + +[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf + +# v3.0.3 + +## Fixed + + - Limit decompression output size to prevent a DoS. Backport from v4.0.1. + # v3.0.2 ## Fixed diff --git a/crypter.go b/crypter.go index 506d3b7..8870e89 100644 --- a/crypter.go +++ b/crypter.go @@ -440,6 +440,9 @@ func (ctx *genericEncrypter) Options() EncrypterOptions { // // Note that ed25519 is only available for signatures, not encryption, so is // not an option here. +// +// Automatically decompresses plaintext, but returns an error if the decompressed +// data would be >250kB or >10x the size of the compressed data, whichever is larger. func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { headers := obj.mergedHeaders(nil) @@ -511,6 +514,9 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) // // The decryptionKey argument must have one of the types allowed for the // decryptionKey argument of Decrypt(). +// +// Automatically decompresses plaintext, but returns an error if the decompressed +// data would be >250kB or >3x the size of the compressed data, whichever is larger. func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { globalHeaders := obj.mergedHeaders(nil) diff --git a/encoding.go b/encoding.go index 62f8b8a..9f07cfd 100644 --- a/encoding.go +++ b/encoding.go @@ -21,6 +21,7 @@ import ( "compress/flate" "encoding/base64" "encoding/binary" + "fmt" "io" "math/big" "strings" @@ -85,7 +86,7 @@ func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { } } -// Compress with DEFLATE +// deflate compresses the input. func deflate(input []byte) ([]byte, error) { output := new(bytes.Buffer) @@ -97,15 +98,27 @@ func deflate(input []byte) ([]byte, error) { return output.Bytes(), err } -// Decompress with DEFLATE +// inflate decompresses the input. +// +// Errors if the decompressed data would be >250kB or >10x the size of the +// compressed data, whichever is larger. func inflate(input []byte) ([]byte, error) { output := new(bytes.Buffer) reader := flate.NewReader(bytes.NewBuffer(input)) - _, err := io.Copy(output, reader) - if err != nil { + maxCompressedSize := 10 * int64(len(input)) + if maxCompressedSize < 250000 { + maxCompressedSize = 250000 + } + + limit := maxCompressedSize + 1 + n, err := io.CopyN(output, reader, limit) + if err != nil && err != io.EOF { return nil, err } + if n == limit { + return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize) + } err = reader.Close() return output.Bytes(), err diff --git a/encoding_test.go b/encoding_test.go index fc48685..1061cdd 100644 --- a/encoding_test.go +++ b/encoding_test.go @@ -18,6 +18,8 @@ package jose import ( "bytes" + "crypto/rand" + "io" "strings" "testing" ) @@ -57,6 +59,38 @@ func TestInvalidCompression(t *testing.T) { } } +// TestLargeZip tests that we can decompress a large input, so long as its +// compression ratio is reasonable. +func TestLargeZip(t *testing.T) { + input := new(bytes.Buffer) + _, err := io.CopyN(input, rand.Reader, 251000) + if err != nil { + t.Fatalf("generating input: %s", err) + } + compressed, err := compress(DEFLATE, input.Bytes()) + if err != nil { + t.Errorf("compressing: %s", err) + } + t.Logf("compression ratio: %g", float64(len(input.Bytes()))/float64(len(compressed))) + _, err = decompress(DEFLATE, compressed) + if err != nil { + t.Errorf("decompressing large input with low compression ratio: %s", err) + } +} + +func TestZipBomb(t *testing.T) { + input := strings.Repeat("a", 251000) + compressed, err := compress(DEFLATE, []byte(input)) + if err != nil { + t.Errorf("compressing: %s", err) + } + t.Logf("compression ratio: %d %g", len(compressed), float64(len(input))/float64(len(compressed))) + out, err := decompress(DEFLATE, compressed) + if err == nil { + t.Errorf("expected error decompressing zip bomb, got none. output size %d", len(out)) + } +} + func TestByteBufferTrim(t *testing.T) { buf := newBufferFromInt(1) if !bytes.Equal(buf.data, []byte{1}) {