diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 317a30a..ffc6101 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -12,10 +12,10 @@ jobs:
steps:
- name: set up go
- uses: actions/setup-go@v5
+ uses: actions/setup-go@v6
with:
cache-dependency-path: backend/
- go-version: "1.23"
+ go-version: "1.25"
id: go
- name: launch mongodb
@@ -24,7 +24,7 @@ jobs:
mongoDBVersion: "5.0"
- name: checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: build and test
working-directory: backend
@@ -36,7 +36,7 @@ jobs:
ENABLE_MONGO_TESTS: "true"
- name: golangci-lint
- uses: golangci/golangci-lint-action@v7
+ uses: golangci/golangci-lint-action@v8
with:
version: latest
working-directory: backend
diff --git a/backend/extractor/pics.go b/backend/extractor/pics.go
index f066442..4d6720c 100644
--- a/backend/extractor/pics.go
+++ b/backend/extractor/pics.go
@@ -23,12 +23,10 @@ func (f *UReadability) extractPics(iselect *goquery.Selection, url string) (main
iselect.Each(func(_ int, s *goquery.Selection) {
if im, ok := s.Attr("src"); ok {
- wg.Add(1)
- go func(url string) {
- size := f.getImageSize(url)
- resCh <- imgInfo{url: url, size: size}
- wg.Done()
- }(im)
+ wg.Go(func() {
+ size := f.getImageSize(im)
+ resCh <- imgInfo{url: im, size: size}
+ })
}
})
diff --git a/backend/extractor/readability.go b/backend/extractor/readability.go
index 63e1d01..a8b55b5 100644
--- a/backend/extractor/readability.go
+++ b/backend/extractor/readability.go
@@ -187,7 +187,7 @@ func (f *UReadability) getContent(ctx context.Context, body, reqURL string, rule
log.Printf("[WARN] custom extractor failed for %s, error=%v", reqURL, err) // back to general parser
}
} else {
- log.Printf("[DEBUG] no rules defined!")
+ log.Print("[DEBUG] no rules defined!")
}
return genParser(body, reqURL)
diff --git a/backend/go.mod b/backend/go.mod
index 0d1d356..6c32e1b 100644
--- a/backend/go.mod
+++ b/backend/go.mod
@@ -1,8 +1,6 @@
module github.com/ukeeper/ukeeper-readability/backend
-go 1.24.0
-
-toolchain go1.24.2
+go 1.25.0
require (
github.com/PuerkitoBio/goquery v1.11.0
@@ -20,15 +18,15 @@ require (
require (
github.com/andybalholm/cascadia v1.3.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/golang/snappy v0.0.4 // indirect
- github.com/klauspost/compress v1.17.8 // indirect
+ github.com/golang/snappy v1.0.0 // indirect
+ github.com/klauspost/compress v1.18.2 // indirect
github.com/montanaflynn/stats v0.7.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
- github.com/xdg-go/scram v1.1.2 // indirect
+ github.com/xdg-go/scram v1.2.0 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
- golang.org/x/crypto v0.44.0 // indirect
+ golang.org/x/crypto v0.45.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/text v0.31.0 // indirect
diff --git a/backend/go.sum b/backend/go.sum
index 76404b2..31d04cb 100644
--- a/backend/go.sum
+++ b/backend/go.sum
@@ -67,8 +67,8 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
-github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
+github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -118,8 +118,8 @@ github.com/kennygrant/sanitize v1.2.4 h1:gN25/otpP5vAsO2djbMhF/LQX6R7+O1TB4yv8Nz
github.com/kennygrant/sanitize v1.2.4/go.mod h1:LGsjYYtgxbetdg5owWB2mpgUL6e2nfw2eObZ0u0qvak=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
-github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
+github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
@@ -192,8 +192,8 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
-github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
-github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
+github.com/xdg-go/scram v1.2.0 h1:bYKF2AEwG5rqd1BumT4gAnvwU/M9nBp2pTSxeZw7Wvs=
+github.com/xdg-go/scram v1.2.0/go.mod h1:3dlrS0iBaWKYVt2ZfA4cj48umJZ+cAEbR6/SjLA88I8=
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
@@ -219,8 +219,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
-golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
-golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
+golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
+golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
diff --git a/backend/main.go b/backend/main.go
index 7fe1898..8fd2f0f 100644
--- a/backend/main.go
+++ b/backend/main.go
@@ -63,7 +63,7 @@ func main() {
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
<-stop
- log.Printf("[WARN] interrupt signal")
+ log.Print("[WARN] interrupt signal")
cancel()
}()
diff --git a/backend/vendor/github.com/golang/snappy/README b/backend/vendor/github.com/golang/snappy/README
index cea1287..fd191f7 100644
--- a/backend/vendor/github.com/golang/snappy/README
+++ b/backend/vendor/github.com/golang/snappy/README
@@ -1,8 +1,13 @@
The Snappy compression format in the Go programming language.
-To download and install from source:
+To use as a library:
$ go get github.com/golang/snappy
+To use as a binary:
+$ go install github.com/golang/snappy/cmd/snappytool@latest
+$ cat decoded | ~/go/bin/snappytool -e > encoded
+$ cat encoded | ~/go/bin/snappytool -d > decoded
+
Unless otherwise noted, the Snappy-Go source files are distributed
under the BSD-style license found in the LICENSE file.
diff --git a/backend/vendor/github.com/golang/snappy/encode_arm64.s b/backend/vendor/github.com/golang/snappy/encode_arm64.s
index f8d54ad..f0c876a 100644
--- a/backend/vendor/github.com/golang/snappy/encode_arm64.s
+++ b/backend/vendor/github.com/golang/snappy/encode_arm64.s
@@ -27,7 +27,7 @@
// The unusual register allocation of local variables, such as R10 for the
// source pointer, matches the allocation used at the call site in encodeBlock,
// which makes it easier to manually inline this function.
-TEXT ·emitLiteral(SB), NOSPLIT, $32-56
+TEXT ·emitLiteral(SB), NOSPLIT, $40-56
MOVD dst_base+0(FP), R8
MOVD lit_base+24(FP), R10
MOVD lit_len+32(FP), R3
@@ -261,7 +261,7 @@ extendMatchEnd:
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill
// local variables (registers) during calls gives 32768 + 64 + 64 = 32896.
-TEXT ·encodeBlock(SB), 0, $32896-56
+TEXT ·encodeBlock(SB), 0, $32904-56
MOVD dst_base+0(FP), R8
MOVD src_base+24(FP), R7
MOVD src_len+32(FP), R14
diff --git a/backend/vendor/github.com/klauspost/compress/.goreleaser.yml b/backend/vendor/github.com/klauspost/compress/.goreleaser.yml
index a229538..4528059 100644
--- a/backend/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/backend/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -1,5 +1,5 @@
-# This is an example goreleaser.yaml file with some sane defaults.
-# Make sure to check the documentation at http://goreleaser.com
+version: 2
+
before:
hooks:
- ./gen.sh
@@ -99,7 +99,7 @@ archives:
checksum:
name_template: 'checksums.txt'
snapshot:
- name_template: "{{ .Tag }}-next"
+ version_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
diff --git a/backend/vendor/github.com/klauspost/compress/README.md b/backend/vendor/github.com/klauspost/compress/README.md
index 05c7359..af2ef63 100644
--- a/backend/vendor/github.com/klauspost/compress/README.md
+++ b/backend/vendor/github.com/klauspost/compress/README.md
@@ -14,8 +14,68 @@ This package provides various compression algorithms.
[](https://github.com/klauspost/compress/actions/workflows/go.yml)
[](https://sourcegraph.com/github.com/klauspost/compress?badge)
+# package usage
+
+Use `go get github.com/klauspost/compress@latest` to add it to your project.
+
+This package will support the current Go version and 2 versions back.
+
+* Use the `nounsafe` tag to disable all use of the "unsafe" package.
+* Use the `noasm` tag to disable all assembly across packages.
+
+Use the links above for more information on each.
+
# changelog
+* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1)
+ * zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079
+ * zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059
+ * s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080
+ * zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086
+ * gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090
+ * flate: Faster load+store https://github.com/klauspost/compress/pull/1104
+ * flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101
+ * flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103
+
+* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)
+ * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036
+ * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028
+ * flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043
+ * flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045
+ * s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048
+ * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049
+ * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050
+
+
+ See changes to v1.17.x
+
+* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)
+ * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017
+ * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014
+ * gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011
+ * gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013
+
+* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
+ * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
+ * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
+ * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982
+ * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007
+ * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996
+
+* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
+ * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
+ * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
+ * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971
+ * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951
+
+* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8)
+ * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885
+ * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938
+
+* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7)
+ * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927
+ * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930
+
* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
@@ -44,9 +104,9 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
- * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
+ * s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871
* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
- * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
+ * s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
@@ -55,7 +115,8 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
-
+
+
See changes to v1.16.x
@@ -81,7 +142,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
- * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
+ * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
@@ -103,7 +164,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
See changes to v1.15.x
* Jan 21st, 2023 (v1.15.15)
- * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
+ * deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
@@ -136,7 +197,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
- * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
+ * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
@@ -146,7 +207,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
- * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
+ * zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643
* July 13, 2022 (v1.15.8)
@@ -188,7 +249,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
- * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
+ * flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590
* May 11, 2022 (v1.15.4)
@@ -215,12 +276,12 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
* Mar 3, 2022 (v1.15.0)
- * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
- * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
+ * zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498)
+ * zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505)
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
- * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
- * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
- * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
+ * flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509)
+ * gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400)
+ * gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510)
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
@@ -237,7 +298,7 @@ While the release has been extensively tested, it is recommended to testing when
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
- * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
+ * huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
* Feb 17, 2022 (v1.14.3)
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
@@ -339,7 +400,7 @@ While the release has been extensively tested, it is recommended to testing when
* s2: Fix binaries.
* Feb 25, 2021 (v1.11.8)
- * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
+ * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended.
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
@@ -518,7 +579,7 @@ While the release has been extensively tested, it is recommended to testing when
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
* Feb 19, 2016: Handle small payloads faster in level 1-3.
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
-* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
+* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5.
* Feb 14, 2016: Snappy: Merge upstream changes.
* Feb 14, 2016: Snappy: Fix aggressive skipping.
* Feb 14, 2016: Snappy: Update benchmark.
@@ -544,12 +605,14 @@ While the release has been extensively tested, it is recommended to testing when
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
-| old import | new import | Documentation
-|--------------------|-----------------------------------------|--------------------|
-| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
-| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
-| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
-| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
+Typical speed is about 2x of the standard library packages.
+
+| old import | new import | Documentation |
+|------------------|---------------------------------------|-------------------------------------------------------------------------|
+| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) |
+| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) |
+| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) |
+| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) |
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
@@ -604,84 +667,6 @@ This will only use up to 4KB in memory when the writer is idle.
Compression is almost always worse than the fastest compression level
and each write will allocate (a little) memory.
-# Performance Update 2018
-
-It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
-
-The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
-
-The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
-
-The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
-
-
-## Overall differences.
-
-There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
-
-The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
-
-This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
-
-There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
-
-## Web Content
-
-This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
-
-Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
-
-Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
-
-## Object files
-
-This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
-
-The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
-
-The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
-
-## Highly Compressible File
-
-This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
-
-It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
-
-So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
-
-## Medium-High Compressible
-
-This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
-
-We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
-
-## Medium Compressible
-
-I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
-
-The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
-
-
-## Un-compressible Content
-
-This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
-
-
-## Huffman only compression
-
-This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
-
-This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
-
-Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
-
-The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
-
-The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
-
-For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
-
-This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
# Other packages
@@ -698,3 +683,4 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
# license
This code is licensed under the same conditions as the original Go code. See LICENSE file.
+
diff --git a/backend/vendor/github.com/klauspost/compress/fse/bitwriter.go b/backend/vendor/github.com/klauspost/compress/fse/bitwriter.go
index e82fa3b..d58b3fe 100644
--- a/backend/vendor/github.com/klauspost/compress/fse/bitwriter.go
+++ b/backend/vendor/github.com/klauspost/compress/fse/bitwriter.go
@@ -143,7 +143,7 @@ func (b *bitWriter) flush32() {
// flushAlign will flush remaining full bytes and align to next byte boundary.
func (b *bitWriter) flushAlign() {
nbBytes := (b.nBits + 7) >> 3
- for i := uint8(0); i < nbBytes; i++ {
+ for i := range nbBytes {
b.out = append(b.out, byte(b.bitContainer>>(i*8)))
}
b.nBits = 0
diff --git a/backend/vendor/github.com/klauspost/compress/fse/compress.go b/backend/vendor/github.com/klauspost/compress/fse/compress.go
index 074018d..8c8baa4 100644
--- a/backend/vendor/github.com/klauspost/compress/fse/compress.go
+++ b/backend/vendor/github.com/klauspost/compress/fse/compress.go
@@ -396,7 +396,7 @@ func (s *Scratch) buildCTable() error {
if v > largeLimit {
s.zeroBits = true
}
- for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
+ for range v {
tableSymbol[position] = symbol
position = (position + step) & tableMask
for position > highThreshold {
diff --git a/backend/vendor/github.com/klauspost/compress/fse/decompress.go b/backend/vendor/github.com/klauspost/compress/fse/decompress.go
index cc05d0f..0c7dd4f 100644
--- a/backend/vendor/github.com/klauspost/compress/fse/decompress.go
+++ b/backend/vendor/github.com/klauspost/compress/fse/decompress.go
@@ -15,7 +15,7 @@ const (
// It is possible, but by no way guaranteed that corrupt data will
// return an error.
// It is up to the caller to verify integrity of the returned data.
-// Use a predefined Scrach to set maximum acceptable output size.
+// Use a predefined Scratch to set maximum acceptable output size.
func Decompress(b []byte, s *Scratch) ([]byte, error) {
s, err := s.prepare(b)
if err != nil {
diff --git a/backend/vendor/github.com/klauspost/compress/huff0/bitreader.go b/backend/vendor/github.com/klauspost/compress/huff0/bitreader.go
index e36d974..bfc7a52 100644
--- a/backend/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/backend/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -6,10 +6,11 @@
package huff0
import (
- "encoding/binary"
"errors"
"fmt"
"io"
+
+ "github.com/klauspost/compress/internal/le"
)
// bitReader reads a bitstream in reverse.
@@ -46,7 +47,7 @@ func (b *bitReaderBytes) init(in []byte) error {
return nil
}
-// peekBitsFast requires that at least one bit is requested every time.
+// peekByteFast requires that at least one byte is requested every time.
// There are no checks if the buffer is filled.
func (b *bitReaderBytes) peekByteFast() uint8 {
got := uint8(b.value >> 56)
@@ -66,8 +67,7 @@ func (b *bitReaderBytes) fillFast() {
}
// 2 bounds checks.
- v := b.in[b.off-4 : b.off]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
b.off -= 4
@@ -76,7 +76,7 @@ func (b *bitReaderBytes) fillFast() {
// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
func (b *bitReaderBytes) fillFastStart() {
// Do single re-slice to avoid bounds checks.
- b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.value = le.Load64(b.in, b.off-8)
b.bitsRead = 0
b.off -= 8
}
@@ -86,9 +86,8 @@ func (b *bitReaderBytes) fill() {
if b.bitsRead < 32 {
return
}
- if b.off > 4 {
- v := b.in[b.off-4 : b.off]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ if b.off >= 4 {
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
b.off -= 4
@@ -175,9 +174,7 @@ func (b *bitReaderShifted) fillFast() {
return
}
- // 2 bounds checks.
- v := b.in[b.off-4 : b.off]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
b.off -= 4
@@ -185,8 +182,7 @@ func (b *bitReaderShifted) fillFast() {
// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
func (b *bitReaderShifted) fillFastStart() {
- // Do single re-slice to avoid bounds checks.
- b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.value = le.Load64(b.in, b.off-8)
b.bitsRead = 0
b.off -= 8
}
@@ -197,8 +193,7 @@ func (b *bitReaderShifted) fill() {
return
}
if b.off > 4 {
- v := b.in[b.off-4 : b.off]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
b.off -= 4
diff --git a/backend/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/backend/vendor/github.com/klauspost/compress/huff0/bitwriter.go
index 0ebc9aa..41db94c 100644
--- a/backend/vendor/github.com/klauspost/compress/huff0/bitwriter.go
+++ b/backend/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -85,7 +85,7 @@ func (b *bitWriter) flush32() {
// flushAlign will flush remaining full bytes and align to next byte boundary.
func (b *bitWriter) flushAlign() {
nbBytes := (b.nBits + 7) >> 3
- for i := uint8(0); i < nbBytes; i++ {
+ for i := range nbBytes {
b.out = append(b.out, byte(b.bitContainer>>(i*8)))
}
b.nBits = 0
diff --git a/backend/vendor/github.com/klauspost/compress/huff0/compress.go b/backend/vendor/github.com/klauspost/compress/huff0/compress.go
index 84aa3d1..a97cf1b 100644
--- a/backend/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/backend/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -276,7 +276,7 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
offsetIdx := len(s.Out)
s.Out = append(s.Out, sixZeros[:]...)
- for i := 0; i < 4; i++ {
+ for i := range 4 {
toDo := src
if len(toDo) > segmentSize {
toDo = toDo[:segmentSize]
@@ -312,7 +312,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
segmentSize := (len(src) + 3) / 4
var wg sync.WaitGroup
wg.Add(4)
- for i := 0; i < 4; i++ {
+ for i := range 4 {
toDo := src
if len(toDo) > segmentSize {
toDo = toDo[:segmentSize]
@@ -326,7 +326,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
}(i)
}
wg.Wait()
- for i := 0; i < 4; i++ {
+ for i := range 4 {
o := s.tmpOut[i]
if len(o) > math.MaxUint16 {
// We cannot store the size in the jump table
diff --git a/backend/vendor/github.com/klauspost/compress/huff0/decompress.go b/backend/vendor/github.com/klauspost/compress/huff0/decompress.go
index 54bd08b..7d0efa8 100644
--- a/backend/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/backend/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -626,7 +626,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
var br [4]bitReaderBytes
start := 6
- for i := 0; i < 3; i++ {
+ for i := range 3 {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
@@ -798,10 +798,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
- endsAt := offset + remainBytes
- if endsAt > len(out) {
- endsAt = len(out)
- }
+ endsAt := min(offset+remainBytes, len(out))
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
@@ -864,7 +861,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
var br [4]bitReaderBytes
start := 6
- for i := 0; i < 3; i++ {
+ for i := range 3 {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
@@ -1035,10 +1032,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
- endsAt := offset + remainBytes
- if endsAt > len(out) {
- endsAt = len(out)
- }
+ endsAt := min(offset+remainBytes, len(out))
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
@@ -1136,7 +1130,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
errs++
}
if errs > 0 {
- fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
+ fmt.Fprintf(w, "%d errors in base, stopping\n", errs)
continue
}
// Ensure that all combinations are covered.
@@ -1152,7 +1146,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
errs++
}
if errs > 20 {
- fmt.Fprintf(w, "%d errros, stopping\n", errs)
+ fmt.Fprintf(w, "%d errors, stopping\n", errs)
break
}
}
diff --git a/backend/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/backend/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
index ba7e8e6..99ddd4a 100644
--- a/backend/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
+++ b/backend/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -58,7 +58,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
- for i := 0; i < 3; i++ {
+ for i := range 3 {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
@@ -109,10 +109,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
- endsAt := offset + remainBytes
- if endsAt > len(out) {
- endsAt = len(out)
- }
+ endsAt := min(offset+remainBytes, len(out))
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
diff --git a/backend/vendor/github.com/klauspost/compress/huff0/huff0.go b/backend/vendor/github.com/klauspost/compress/huff0/huff0.go
index 77ecd68..67d9e05 100644
--- a/backend/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/backend/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -201,7 +201,7 @@ func (c cTable) write(s *Scratch) error {
for i := range hist[:16] {
hist[i] = 0
}
- for n := uint8(0); n < maxSymbolValue; n++ {
+ for n := range maxSymbolValue {
v := bitsToWeight[c[n].nBits] & 15
huffWeight[n] = v
hist[v]++
@@ -271,7 +271,7 @@ func (c cTable) estTableSize(s *Scratch) (sz int, err error) {
for i := range hist[:16] {
hist[i] = 0
}
- for n := uint8(0); n < maxSymbolValue; n++ {
+ for n := range maxSymbolValue {
v := bitsToWeight[c[n].nBits] & 15
huffWeight[n] = v
hist[v]++
diff --git a/backend/vendor/github.com/klauspost/compress/internal/le/le.go b/backend/vendor/github.com/klauspost/compress/internal/le/le.go
new file mode 100644
index 0000000..e54909e
--- /dev/null
+++ b/backend/vendor/github.com/klauspost/compress/internal/le/le.go
@@ -0,0 +1,5 @@
+package le
+
+type Indexer interface {
+ int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
+}
diff --git a/backend/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/backend/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
new file mode 100644
index 0000000..4f2a0d8
--- /dev/null
+++ b/backend/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
@@ -0,0 +1,42 @@
+//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine
+
+package le
+
+import (
+ "encoding/binary"
+)
+
+// Load8 will load from b at index i.
+func Load8[I Indexer](b []byte, i I) byte {
+ return b[i]
+}
+
+// Load16 will load from b at index i.
+func Load16[I Indexer](b []byte, i I) uint16 {
+ return binary.LittleEndian.Uint16(b[i:])
+}
+
+// Load32 will load from b at index i.
+func Load32[I Indexer](b []byte, i I) uint32 {
+ return binary.LittleEndian.Uint32(b[i:])
+}
+
+// Load64 will load from b at index i.
+func Load64[I Indexer](b []byte, i I) uint64 {
+ return binary.LittleEndian.Uint64(b[i:])
+}
+
+// Store16 will store v at b.
+func Store16(b []byte, v uint16) {
+ binary.LittleEndian.PutUint16(b, v)
+}
+
+// Store32 will store v at b.
+func Store32(b []byte, v uint32) {
+ binary.LittleEndian.PutUint32(b, v)
+}
+
+// Store64 will store v at b.
+func Store64[I Indexer](b []byte, i I, v uint64) {
+ binary.LittleEndian.PutUint64(b[i:], v)
+}
diff --git a/backend/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/backend/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
new file mode 100644
index 0000000..218a38b
--- /dev/null
+++ b/backend/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
@@ -0,0 +1,52 @@
+// We enable 64 bit LE platforms:
+
+//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine
+
+package le
+
+import (
+ "unsafe"
+)
+
+// Load8 will load from b at index i.
+func Load8[I Indexer](b []byte, i I) byte {
+ //return binary.LittleEndian.Uint16(b[i:])
+ //return *(*uint16)(unsafe.Pointer(&b[i]))
+ return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load16 will load from b at index i.
+func Load16[I Indexer](b []byte, i I) uint16 {
+ //return binary.LittleEndian.Uint16(b[i:])
+ //return *(*uint16)(unsafe.Pointer(&b[i]))
+ return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load32 will load from b at index i.
+func Load32[I Indexer](b []byte, i I) uint32 {
+ //return binary.LittleEndian.Uint32(b[i:])
+ //return *(*uint32)(unsafe.Pointer(&b[i]))
+ return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load64 will load from b at index i.
+func Load64[I Indexer](b []byte, i I) uint64 {
+ //return binary.LittleEndian.Uint64(b[i:])
+ //return *(*uint64)(unsafe.Pointer(&b[i]))
+ return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Store16 will store v at b.
+func Store16(b []byte, v uint16) {
+ *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
+
+// Store32 will store v at b.
+func Store32(b []byte, v uint32) {
+ *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
+
+// Store64 will store v at b[i:].
+func Store64[I Indexer](b []byte, i I, v uint64) {
+ *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) = v
+}
diff --git a/backend/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/backend/vendor/github.com/klauspost/compress/internal/snapref/decode.go
index 40796a4..a2c82fc 100644
--- a/backend/vendor/github.com/klauspost/compress/internal/snapref/decode.go
+++ b/backend/vendor/github.com/klauspost/compress/internal/snapref/decode.go
@@ -209,7 +209,7 @@ func (r *Reader) fill() error {
if !r.readFull(r.buf[:len(magicBody)], false) {
return r.err
}
- for i := 0; i < len(magicBody); i++ {
+ for i := range len(magicBody) {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
return r.err
diff --git a/backend/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/backend/vendor/github.com/klauspost/compress/internal/snapref/encode.go
index 13c6040..860a994 100644
--- a/backend/vendor/github.com/klauspost/compress/internal/snapref/encode.go
+++ b/backend/vendor/github.com/klauspost/compress/internal/snapref/encode.go
@@ -20,8 +20,10 @@ import (
func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
- } else if len(dst) < n {
+ } else if cap(dst) < n {
dst = make([]byte, n)
+ } else {
+ dst = dst[:n]
}
// The block starts with the varint-encoded length of the decompressed bytes.
diff --git a/backend/vendor/github.com/klauspost/compress/s2sx.mod b/backend/vendor/github.com/klauspost/compress/s2sx.mod
index 5a4412f..81bda5e 100644
--- a/backend/vendor/github.com/klauspost/compress/s2sx.mod
+++ b/backend/vendor/github.com/klauspost/compress/s2sx.mod
@@ -1,4 +1,3 @@
module github.com/klauspost/compress
-go 1.19
-
+go 1.22
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/README.md b/backend/vendor/github.com/klauspost/compress/zstd/README.md
index 92e2347..c11d7fa 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/backend/vendor/github.com/klauspost/compress/zstd/README.md
@@ -6,7 +6,7 @@ A high performance compression algorithm is implemented. For now focused on spee
This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content.
-This package is pure Go and without use of "unsafe".
+This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features.
The `zstd` package is provided as open source software using a Go standard license.
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/bitreader.go b/backend/vendor/github.com/klauspost/compress/zstd/bitreader.go
index 25ca983..d41e3e1 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/bitreader.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -5,11 +5,12 @@
package zstd
import (
- "encoding/binary"
"errors"
"fmt"
"io"
"math/bits"
+
+ "github.com/klauspost/compress/internal/le"
)
// bitReader reads a bitstream in reverse.
@@ -18,6 +19,7 @@ import (
type bitReader struct {
in []byte
value uint64 // Maybe use [16]byte, but shifting is awkward.
+ cursor int // offset where next read should end
bitsRead uint8
}
@@ -32,6 +34,7 @@ func (b *bitReader) init(in []byte) error {
if v == 0 {
return errors.New("corrupt stream, did not find end of stream")
}
+ b.cursor = len(in)
b.bitsRead = 64
b.value = 0
if len(in) >= 8 {
@@ -67,18 +70,15 @@ func (b *bitReader) fillFast() {
if b.bitsRead < 32 {
return
}
- v := b.in[len(b.in)-4:]
- b.in = b.in[:len(b.in)-4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- b.value = (b.value << 32) | uint64(low)
+ b.cursor -= 4
+ b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
b.bitsRead -= 32
}
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
func (b *bitReader) fillFastStart() {
- v := b.in[len(b.in)-8:]
- b.in = b.in[:len(b.in)-8]
- b.value = binary.LittleEndian.Uint64(v)
+ b.cursor -= 8
+ b.value = le.Load64(b.in, b.cursor)
b.bitsRead = 0
}
@@ -87,25 +87,23 @@ func (b *bitReader) fill() {
if b.bitsRead < 32 {
return
}
- if len(b.in) >= 4 {
- v := b.in[len(b.in)-4:]
- b.in = b.in[:len(b.in)-4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- b.value = (b.value << 32) | uint64(low)
+ if b.cursor >= 4 {
+ b.cursor -= 4
+ b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
b.bitsRead -= 32
return
}
- b.bitsRead -= uint8(8 * len(b.in))
- for len(b.in) > 0 {
- b.value = (b.value << 8) | uint64(b.in[len(b.in)-1])
- b.in = b.in[:len(b.in)-1]
+ b.bitsRead -= uint8(8 * b.cursor)
+ for b.cursor > 0 {
+ b.cursor -= 1
+ b.value = (b.value << 8) | uint64(b.in[b.cursor])
}
}
// finished returns true if all bits have been read from the bit stream.
func (b *bitReader) finished() bool {
- return len(b.in) == 0 && b.bitsRead >= 64
+ return b.cursor == 0 && b.bitsRead >= 64
}
// overread returns true if more bits have been requested than is on the stream.
@@ -115,13 +113,14 @@ func (b *bitReader) overread() bool {
// remain returns the number of bits remaining.
func (b *bitReader) remain() uint {
- return 8*uint(len(b.in)) + 64 - uint(b.bitsRead)
+ return 8*uint(b.cursor) + 64 - uint(b.bitsRead)
}
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReader) close() error {
// Release reference.
b.in = nil
+ b.cursor = 0
if !b.finished() {
return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
}
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/backend/vendor/github.com/klauspost/compress/zstd/bitwriter.go
index 1952f17..b22b297 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/bitwriter.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -88,7 +88,7 @@ func (b *bitWriter) flush32() {
// flushAlign will flush remaining full bytes and align to next byte boundary.
func (b *bitWriter) flushAlign() {
nbBytes := (b.nBits + 7) >> 3
- for i := uint8(0); i < nbBytes; i++ {
+ for i := range nbBytes {
b.out = append(b.out, byte(b.bitContainer>>(i*8)))
}
b.nBits = 0
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/blockdec.go b/backend/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 03744fb..2329e99 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -5,14 +5,10 @@
package zstd
import (
- "bytes"
- "encoding/binary"
"errors"
"fmt"
"hash/crc32"
"io"
- "os"
- "path/filepath"
"sync"
"github.com/klauspost/compress/huff0"
@@ -58,11 +54,11 @@ const (
)
var (
- huffDecoderPool = sync.Pool{New: func() interface{} {
+ huffDecoderPool = sync.Pool{New: func() any {
return &huff0.Scratch{}
}}
- fseDecoderPool = sync.Pool{New: func() interface{} {
+ fseDecoderPool = sync.Pool{New: func() any {
return &fseDecoder{}
}}
)
@@ -557,7 +553,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
if compMode&3 != 0 {
return errors.New("corrupt block: reserved bits not zero")
}
- for i := uint(0); i < 3; i++ {
+ for i := range uint(3) {
mode := seqCompMode((compMode >> (6 - i*2)) & 3)
if debugDecoder {
println("Table", tableIndex(i), "is", mode)
@@ -598,7 +594,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
printf("RLE set to 0x%x, code: %v", symb, v)
}
case compModeFSE:
- println("Reading table for", tableIndex(i))
+ if debugDecoder {
+ println("Reading table for", tableIndex(i))
+ }
if seq.fse == nil || seq.fse.preDefined {
seq.fse = fseDecoderPool.Get().(*fseDecoder)
}
@@ -646,21 +644,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
println("initializing sequences:", err)
return err
}
- // Extract blocks...
- if false && hist.dict == nil {
- fatalErr := func(err error) {
- if err != nil {
- panic(err)
- }
- }
- fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
- var buf bytes.Buffer
- fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
- fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
- fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
- buf.Write(in)
- os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
- }
return nil
}
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/blockenc.go b/backend/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 32a7f40..fd35ea1 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -9,6 +9,7 @@ import (
"fmt"
"math"
"math/bits"
+ "slices"
"github.com/klauspost/compress/huff0"
)
@@ -457,16 +458,7 @@ func fuzzFseEncoder(data []byte) int {
// All 0
return 0
}
- maxCount := func(a []uint32) int {
- var max uint32
- for _, v := range a {
- if v > max {
- max = v
- }
- }
- return int(max)
- }
- cnt := maxCount(hist[:maxSym])
+ cnt := int(slices.Max(hist[:maxSym]))
if cnt == len(data) {
// RLE
return 0
@@ -884,15 +876,6 @@ func (b *blockEnc) genCodes() {
}
}
}
- maxCount := func(a []uint32) int {
- var max uint32
- for _, v := range a {
- if v > max {
- max = v
- }
- }
- return int(max)
- }
if debugAsserts && mlMax > maxMatchLengthSymbol {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
}
@@ -903,7 +886,7 @@ func (b *blockEnc) genCodes() {
panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
}
- b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
- b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
- b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
+ b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1])))
+ b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1])))
+ b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1])))
}
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/decoder.go b/backend/vendor/github.com/klauspost/compress/zstd/decoder.go
index bbca172..30df551 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -123,7 +123,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
}
// Read bytes from the decompressed stream into p.
-// Returns the number of bytes written and any error that occurred.
+// Returns the number of bytes read and any error that occurred.
// When the stream is done, io.EOF will be returned.
func (d *Decoder) Read(p []byte) (int, error) {
var n int
@@ -323,6 +323,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
frame.bBuf = nil
if frame.history.decoders.br != nil {
frame.history.decoders.br.in = nil
+ frame.history.decoders.br.cursor = 0
}
d.decoders <- block
}()
@@ -372,11 +373,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
if cap(dst) == 0 && !d.o.limitToCap {
// Allocate len(input) * 2 by default if nothing is provided
// and we didn't get frame content size.
- size := len(input) * 2
- // Cap to 1 MB.
- if size > 1<<20 {
- size = 1 << 20
- }
+ size := min(
+ // Cap to 1 MB.
+ len(input)*2, 1<<20)
if uint64(size) > d.o.maxDecodedSize {
size = int(d.o.maxDecodedSize)
}
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/dict.go b/backend/vendor/github.com/klauspost/compress/zstd/dict.go
index 8d5567f..2ffbfdf 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/dict.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -194,17 +194,17 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
hist := o.History
contents := o.Contents
debug := o.DebugOut != nil
- println := func(args ...interface{}) {
+ println := func(args ...any) {
if o.DebugOut != nil {
fmt.Fprintln(o.DebugOut, args...)
}
}
- printf := func(s string, args ...interface{}) {
+ printf := func(s string, args ...any) {
if o.DebugOut != nil {
fmt.Fprintf(o.DebugOut, s, args...)
}
}
- print := func(args ...interface{}) {
+ print := func(args ...any) {
if o.DebugOut != nil {
fmt.Fprint(o.DebugOut, args...)
}
@@ -273,6 +273,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
enc.Encode(&block, b)
addValues(&remain, block.literals)
litTotal += len(block.literals)
+ if len(block.sequences) == 0 {
+ continue
+ }
seqs += len(block.sequences)
block.genCodes()
addHist(&ll, block.coders.llEnc.Histogram())
@@ -286,6 +289,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
if offset == 0 {
continue
}
+ if int(offset) >= len(o.History) {
+ continue
+ }
if offset > 3 {
newOffsets[offset-3]++
} else {
@@ -336,6 +342,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
if seqs/nUsed < 512 {
// Use 512 as minimum.
nUsed = seqs / 512
+ if nUsed == 0 {
+ nUsed = 1
+ }
}
copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
hist := dst.Histogram()
@@ -358,6 +367,28 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
fakeLength += v
hist[i] = uint32(v)
}
+
+ // Ensure we aren't trying to represent RLE.
+ if maxCount == fakeLength {
+ for i := range hist {
+ if uint8(i) == maxSym {
+ fakeLength++
+ maxSym++
+ hist[i+1] = 1
+ if maxSym > 1 {
+ break
+ }
+ }
+ if hist[0] == 0 {
+ fakeLength++
+ hist[i] = 1
+ if maxSym > 1 {
+ break
+ }
+ }
+ }
+ }
+
dst.HistogramFinished(maxSym, maxCount)
dst.reUsed = false
dst.useRLE = false
@@ -393,16 +424,10 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
}
// Literal table
- avgSize := litTotal
- if avgSize > huff0.BlockSizeMax/2 {
- avgSize = huff0.BlockSizeMax / 2
- }
+ avgSize := min(litTotal, huff0.BlockSizeMax/2)
huffBuff := make([]byte, 0, avgSize)
// Target size
- div := litTotal / avgSize
- if div < 1 {
- div = 1
- }
+ div := max(litTotal/avgSize, 1)
if debug {
println("Huffman weights:")
}
@@ -423,7 +448,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
huffBuff = append(huffBuff, 255)
}
scratch := &huff0.Scratch{TableLog: 11}
- for tries := 0; tries < 255; tries++ {
+ for tries := range 255 {
scratch = &huff0.Scratch{TableLog: 11}
_, _, err = huff0.Compress1X(huffBuff, scratch)
if err == nil {
@@ -440,7 +465,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
// Bail out.... Just generate something
huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
- for i := 0; i < 128; i++ {
+ for i := range 128 {
huffBuff = append(huffBuff, byte(i))
}
continue
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/enc_base.go b/backend/vendor/github.com/klauspost/compress/zstd/enc_base.go
index 5ca4603..c1192ec 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/enc_base.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -8,7 +8,7 @@ import (
)
const (
- dictShardBits = 6
+ dictShardBits = 7
)
type fastBase struct {
@@ -41,11 +41,9 @@ func (e *fastBase) AppendCRC(dst []byte) []byte {
// or a window size small enough to contain the input size, if > 0.
func (e *fastBase) WindowSize(size int64) int32 {
if size > 0 && size < int64(e.maxMatchOff) {
- b := int32(1) << uint(bits.Len(uint(size)))
- // Keep minimum window.
- if b < 1024 {
- b = 1024
- }
+ b := max(
+ // Keep minimum window.
+ int32(1)< e.maxMatchOff {
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/enc_best.go b/backend/vendor/github.com/klauspost/compress/zstd/enc_best.go
index 4613724..c1581cf 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/enc_best.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -158,11 +158,9 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
// Use this to estimate literal cost.
// Scaled by 10 bits.
- bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src))
- // Huffman can never go < 1 bit/byte
- if bitsPerByte < 1024 {
- bitsPerByte = 1024
- }
+ bitsPerByte := max(
+ // Huffman can never go < 1 bit/byte
+ int32((compress.ShannonEntropyBits(src)*1024)/len(src)), 1024)
// Override src
src = e.hist
@@ -235,10 +233,7 @@ encodeLoop:
// Extend candidate match backwards as far as possible.
// Do not extend repeats as we can assume they are optimal
// and offsets change if s == nextEmit.
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
s--
offset--
@@ -382,10 +377,7 @@ encodeLoop:
nextEmit = s
// Index skipped...
- end := s
- if s > sLimit+4 {
- end = sLimit + 4
- }
+ end := min(s, sLimit+4)
off := index0 + e.cur
for index0 < end {
cv0 := load6432(src, index0)
@@ -444,10 +436,7 @@ encodeLoop:
nextEmit = s
// Index old s + 1 -> s - 1 or sLimit
- end := s
- if s > sLimit-4 {
- end = sLimit - 4
- }
+ end := min(s, sLimit-4)
off := index0 + e.cur
for index0 < end {
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/enc_better.go b/backend/vendor/github.com/klauspost/compress/zstd/enc_better.go
index a4f5bf9..85dcd28 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -179,9 +179,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -190,10 +190,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
@@ -210,12 +207,12 @@ encodeLoop:
// Index match start+1 (long) -> s - 1
index0 := s + repOff
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -241,9 +238,9 @@ encodeLoop:
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well.
var seq seq
- lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+ length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -252,10 +249,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
@@ -270,11 +264,11 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff2
+ s += length + repOff2
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -480,10 +474,7 @@ encodeLoop:
l := matched
// Extend backwards
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
@@ -708,9 +699,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -719,10 +710,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
@@ -738,12 +726,12 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -772,9 +760,9 @@ encodeLoop:
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well.
var seq seq
- lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+ length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -783,10 +771,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
@@ -801,11 +786,11 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff2
+ s += length + repOff2
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -1005,10 +990,7 @@ encodeLoop:
l := matched
// Extend backwards
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/backend/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index a154c18..cf8cad0 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -13,7 +13,7 @@ const (
dFastLongLen = 8 // Bytes used for table hash
dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table
- dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard
+ dLongTableShardSize = dFastLongTableSize / dLongTableShardCnt // Size of an individual shard
dFastShortTableBits = tableBits // Bits used in the short match table
dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
@@ -138,9 +138,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -149,10 +149,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
@@ -166,11 +163,11 @@ encodeLoop:
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -266,10 +263,7 @@ encodeLoop:
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
@@ -462,10 +456,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] {
repIndex--
start--
@@ -576,10 +567,7 @@ encodeLoop:
l := int32(matchLen(src[s+4:], src[t+4:])) + 4
// Extend backwards
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
@@ -798,9 +786,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -809,10 +797,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
@@ -826,11 +811,11 @@ encodeLoop:
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -927,10 +912,7 @@ encodeLoop:
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/backend/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index f45a3da..9180a3a 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -143,10 +143,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
- sMin := s - e.maxMatchOff
- if sMin < 0 {
- sMin = 0
- }
+ sMin := max(s-e.maxMatchOff, 0)
for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
repIndex--
start--
@@ -223,10 +220,7 @@ encodeLoop:
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
@@ -387,10 +381,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
- sMin := s - e.maxMatchOff
- if sMin < 0 {
- sMin = 0
- }
+ sMin := max(s-e.maxMatchOff, 0)
for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] {
repIndex--
start--
@@ -469,10 +460,7 @@ encodeLoop:
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
@@ -655,10 +643,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
- sMin := s - e.maxMatchOff
- if sMin < 0 {
- sMin = 0
- }
+ sMin := max(s-e.maxMatchOff, 0)
for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
repIndex--
start--
@@ -735,10 +720,7 @@ encodeLoop:
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
+ tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/encoder.go b/backend/vendor/github.com/klauspost/compress/zstd/encoder.go
index 72af7ef..8f8223c 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -6,6 +6,7 @@ package zstd
import (
"crypto/rand"
+ "errors"
"fmt"
"io"
"math"
@@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
// and write CRC if requested.
func (e *Encoder) Write(p []byte) (n int, err error) {
s := &e.state
+ if s.eofWritten {
+ return 0, ErrEncoderClosed
+ }
for len(p) > 0 {
if len(p)+len(s.filling) < e.o.blockSize {
if e.o.crc {
@@ -202,7 +206,7 @@ func (e *Encoder) nextBlock(final bool) error {
return nil
}
if final && len(s.filling) > 0 {
- s.current = e.EncodeAll(s.filling, s.current[:0])
+ s.current = e.encodeAll(s.encoder, s.filling, s.current[:0])
var n2 int
n2, s.err = s.w.Write(s.current)
if s.err != nil {
@@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error {
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
s.nInput += int64(len(s.current))
s.wg.Add(1)
+ if final {
+ s.eofWritten = true
+ }
go func(src []byte) {
if debugEncoder {
println("Adding block,", len(src), "bytes, final:", final)
@@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error {
blk := enc.Block()
enc.Encode(blk, src)
blk.last = final
- if final {
- s.eofWritten = true
- }
// Wait for pending writes.
s.wWg.Wait()
if s.writeErr != nil {
@@ -401,12 +405,20 @@ func (e *Encoder) Flush() error {
if len(s.filling) > 0 {
err := e.nextBlock(false)
if err != nil {
+ // Ignore Flush after Close.
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return err
}
}
s.wg.Wait()
s.wWg.Wait()
if s.err != nil {
+ // Ignore Flush after Close.
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return s.err
}
return s.writeErr
@@ -422,6 +434,9 @@ func (e *Encoder) Close() error {
}
err := e.nextBlock(true)
if err != nil {
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return err
}
if s.frameContentSize > 0 {
@@ -459,6 +474,11 @@ func (e *Encoder) Close() error {
}
_, s.err = s.w.Write(frame)
}
+ if s.err == nil {
+ s.err = ErrEncoderClosed
+ return nil
+ }
+
return s.err
}
@@ -469,6 +489,15 @@ func (e *Encoder) Close() error {
// Data compressed with EncodeAll can be decoded with the Decoder,
// using either a stream or DecodeAll.
func (e *Encoder) EncodeAll(src, dst []byte) []byte {
+ e.init.Do(e.initialize)
+ enc := <-e.encoders
+ defer func() {
+ e.encoders <- enc
+ }()
+ return e.encodeAll(enc, src, dst)
+}
+
+func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte {
if len(src) == 0 {
if e.o.fullZero {
// Add frame header.
@@ -491,13 +520,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
return dst
}
- e.init.Do(e.initialize)
- enc := <-e.encoders
- defer func() {
- // Release encoder reference to last block.
- // If a non-single block is needed the encoder will reset again.
- e.encoders <- enc
- }()
+
// Use single segments when above minimum window and below window size.
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
if e.o.single != nil {
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/framedec.go b/backend/vendor/github.com/klauspost/compress/zstd/framedec.go
index 53e160f..d88f067 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error {
}
return err
}
- printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+ if debugDecoder {
+ printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+ }
windowLog := 10 + (wd >> 3)
windowBase := uint64(1) << windowLog
windowAdd := (windowBase / 8) * uint64(wd&0x7)
@@ -236,10 +238,7 @@ func (d *frameDec) reset(br byteBuffer) error {
if d.WindowSize == 0 && d.SingleSegment {
// We may not need window in this case.
- d.WindowSize = d.FrameContentSize
- if d.WindowSize < MinWindowSize {
- d.WindowSize = MinWindowSize
- }
+ d.WindowSize = max(d.FrameContentSize, MinWindowSize)
if d.WindowSize > d.o.maxDecodedSize {
if debugDecoder {
printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/backend/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
index ab26326..3a0f4e7 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -149,7 +149,7 @@ func (s *fseEncoder) buildCTable() error {
if v > largeLimit {
s.zeroBits = true
}
- for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
+ for range v {
tableSymbol[position] = symbol
position = (position + step) & tableMask
for position > highThreshold {
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/backend/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
index 17901e0..ae7d4d3 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
+++ b/backend/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
@@ -162,12 +162,12 @@ finalize:
MOVD h, ret+24(FP)
RET
-// func writeBlocks(d *Digest, b []byte) int
+// func writeBlocks(s *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
LDP ·primes+0(SB), (prime1, prime2)
// Load state. Assume v[1-4] are stored contiguously.
- MOVD d+0(FP), digest
+ MOVD s+0(FP), digest
LDP 0(digest), (v1, v2)
LDP 16(digest), (v3, v4)
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/backend/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
index 9a7655c..0782b86 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
+++ b/backend/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
@@ -5,7 +5,6 @@
#include "textflag.h"
// func matchLen(a []byte, b []byte) int
-// Requires: BMI
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
@@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56
JB matchlen_match4_standalone
matchlen_loopback_standalone:
- MOVQ (AX)(SI*1), BX
- XORQ (CX)(SI*1), BX
- TESTQ BX, BX
- JZ matchlen_loop_standalone
+ MOVQ (AX)(SI*1), BX
+ XORQ (CX)(SI*1), BX
+ JZ matchlen_loop_standalone
#ifdef GOAMD64_v3
TZCNTQ BX, BX
#else
BSFQ BX, BX
#endif
- SARQ $0x03, BX
+ SHRL $0x03, BX
LEAL (SI)(BX*1), SI
JMP gen_match_len_end
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/backend/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
index 57b9c31..bea1779 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
@@ -7,20 +7,25 @@
package zstd
import (
- "encoding/binary"
"math/bits"
+
+ "github.com/klauspost/compress/internal/le"
)
// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
func matchLen(a, b []byte) (n int) {
- for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
- diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
+ left := len(a)
+ for left >= 8 {
+ diff := le.Load64(a, n) ^ le.Load64(b, n)
if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3
}
n += 8
+ left -= 8
}
+ a = a[n:]
+ b = b[n:]
for i := range a {
if a[i] != b[i] {
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/seqdec.go b/backend/vendor/github.com/klauspost/compress/zstd/seqdec.go
index d7fe6d8..0bfb0e4 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -231,10 +231,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
out := s.out
- maxBlockSize := maxCompressedBlockSize
- if s.windowSize < maxBlockSize {
- maxBlockSize = s.windowSize
- }
+ maxBlockSize := min(s.windowSize, maxCompressedBlockSize)
if debugDecoder {
println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")
@@ -245,7 +242,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
return io.ErrUnexpectedEOF
}
var ll, mo, ml int
- if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
+ if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/backend/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
index 8adabd8..1f8c3ce 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -79,10 +79,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
br := s.br
- maxBlockSize := maxCompressedBlockSize
- if s.windowSize < maxBlockSize {
- maxBlockSize = s.windowSize
- }
+ maxBlockSize := min(s.windowSize, maxCompressedBlockSize)
ctx := decodeSyncAsmContext{
llTable: s.litLengths.fse.dt[:maxTablesize],
@@ -146,7 +143,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
default:
- return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
+ return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode)
}
s.seqSize += ctx.litRemain
@@ -237,10 +234,7 @@ func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmC
func (s *sequenceDecs) decode(seqs []seqVals) error {
br := s.br
- maxBlockSize := maxCompressedBlockSize
- if s.windowSize < maxBlockSize {
- maxBlockSize = s.windowSize
- }
+ maxBlockSize := min(s.windowSize, maxCompressedBlockSize)
ctx := decodeAsmContext{
llTable: s.litLengths.fse.dt[:maxTablesize],
@@ -292,7 +286,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
return io.ErrUnexpectedEOF
}
- return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
+ return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode)
}
if ctx.litRemain < 0 {
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/backend/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index 5b06174..a708ca6 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/backend/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -7,9 +7,9 @@
TEXT ·sequenceDecs_decode_amd64(SB), $8-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
- MOVBQZX 32(CX), BX
+ MOVBQZX 40(CX), BX
MOVQ (CX), AX
- MOVQ 8(CX), SI
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -299,8 +299,8 @@ sequenceDecs_decode_amd64_match_len_ofs_ok:
MOVQ R13, 160(AX)
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
- MOVB BL, 32(AX)
- MOVQ SI, 8(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 32(AX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -335,9 +335,9 @@ error_overread:
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
- MOVBQZX 32(CX), BX
+ MOVBQZX 40(CX), BX
MOVQ (CX), AX
- MOVQ 8(CX), SI
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -598,8 +598,8 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok:
MOVQ R13, 160(AX)
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
- MOVB BL, 32(AX)
- MOVQ SI, 8(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 32(AX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -634,9 +634,9 @@ error_overread:
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
- MOVBQZX 32(BX), DX
+ MOVBQZX 40(BX), DX
MOVQ (BX), CX
- MOVQ 8(BX), BX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -884,8 +884,8 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok:
MOVQ R12, 160(CX)
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
- MOVB DL, 32(CX)
- MOVQ BX, 8(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 32(CX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -920,9 +920,9 @@ error_overread:
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
- MOVBQZX 32(BX), DX
+ MOVBQZX 40(BX), DX
MOVQ (BX), CX
- MOVQ 8(BX), BX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -1141,8 +1141,8 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
MOVQ R12, 160(CX)
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
- MOVB DL, 32(CX)
- MOVQ BX, 8(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 32(CX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -1787,9 +1787,9 @@ empty_seqs:
TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
- MOVBQZX 32(CX), BX
+ MOVBQZX 40(CX), BX
MOVQ (CX), AX
- MOVQ 8(CX), SI
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
MOVQ 40(SP), AX
ADDQ AX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R10, 32(SP)
// outBase += outPosition
@@ -2281,8 +2281,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
- MOVB BL, 32(AX)
- MOVQ SI, 8(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 32(AX)
// Update the context
MOVQ ctx+16(FP), AX
@@ -2349,9 +2349,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
- MOVBQZX 32(BX), DX
+ MOVBQZX 40(BX), DX
MOVQ (BX), CX
- MOVQ 8(BX), BX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
MOVQ 40(SP), CX
ADDQ CX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R9, 32(SP)
// outBase += outPosition
@@ -2801,8 +2801,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
- MOVB DL, 32(CX)
- MOVQ BX, 8(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 32(CX)
// Update the context
MOVQ ctx+16(FP), AX
@@ -2869,9 +2869,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
- MOVBQZX 32(CX), BX
+ MOVBQZX 40(CX), BX
MOVQ (CX), AX
- MOVQ 8(CX), SI
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
MOVQ 40(SP), AX
ADDQ AX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R10, 32(SP)
// outBase += outPosition
@@ -3465,8 +3465,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
- MOVB BL, 32(AX)
- MOVQ SI, 8(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 32(AX)
// Update the context
MOVQ ctx+16(FP), AX
@@ -3533,9 +3533,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
- MOVBQZX 32(BX), DX
+ MOVBQZX 40(BX), DX
MOVQ (BX), CX
- MOVQ 8(BX), BX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
MOVQ 40(SP), CX
ADDQ CX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R9, 32(SP)
// outBase += outPosition
@@ -4087,8 +4087,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
- MOVB DL, 32(CX)
- MOVQ BX, 8(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 32(CX)
// Update the context
MOVQ ctx+16(FP), AX
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/backend/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
index 2fb35b7..7cec219 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
@@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
}
for i := range seqs {
var ll, mo, ml int
- if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
+ if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/seqenc.go b/backend/vendor/github.com/klauspost/compress/zstd/seqenc.go
index 8014174..65045ea 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/seqenc.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/seqenc.go
@@ -69,7 +69,6 @@ var llBitsTable = [maxLLCode + 1]byte{
func llCode(litLength uint32) uint8 {
const llDeltaCode = 19
if litLength <= 63 {
- // Compiler insists on bounds check (Go 1.12)
return llCodeTable[litLength&63]
}
return uint8(highBit(litLength)) + llDeltaCode
@@ -102,7 +101,6 @@ var mlBitsTable = [maxMLCode + 1]byte{
func mlCode(mlBase uint32) uint8 {
const mlDeltaCode = 36
if mlBase <= 127 {
- // Compiler insists on bounds check (Go 1.12)
return mlCodeTable[mlBase&127]
}
return uint8(highBit(mlBase)) + mlDeltaCode
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/simple_go124.go b/backend/vendor/github.com/klauspost/compress/zstd/simple_go124.go
new file mode 100644
index 0000000..2efc049
--- /dev/null
+++ b/backend/vendor/github.com/klauspost/compress/zstd/simple_go124.go
@@ -0,0 +1,56 @@
+// Copyright 2025+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+//go:build go1.24
+
+package zstd
+
+import (
+ "errors"
+ "runtime"
+ "sync"
+ "weak"
+)
+
+var weakMu sync.Mutex
+var simpleEnc weak.Pointer[Encoder]
+var simpleDec weak.Pointer[Decoder]
+
+// EncodeTo appends the encoded data from src to dst.
+func EncodeTo(dst []byte, src []byte) []byte {
+ weakMu.Lock()
+ enc := simpleEnc.Value()
+ if enc == nil {
+ var err error
+ enc, err = NewWriter(nil, WithEncoderConcurrency(runtime.NumCPU()), WithWindowSize(1<<20), WithLowerEncoderMem(true), WithZeroFrames(true))
+ if err != nil {
+ panic("failed to create simple encoder: " + err.Error())
+ }
+ simpleEnc = weak.Make(enc)
+ }
+ weakMu.Unlock()
+
+ return enc.EncodeAll(src, dst)
+}
+
+// DecodeTo appends the decoded data from src to dst.
+// The maximum decoded size is 1GiB,
+// not including what may already be in dst.
+func DecodeTo(dst []byte, src []byte) ([]byte, error) {
+ weakMu.Lock()
+ dec := simpleDec.Value()
+ if dec == nil {
+ var err error
+ dec, err = NewReader(nil, WithDecoderConcurrency(runtime.NumCPU()), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<30))
+ if err != nil {
+ weakMu.Unlock()
+ return nil, errors.New("failed to create simple decoder: " + err.Error())
+ }
+ runtime.SetFinalizer(dec, func(d *Decoder) {
+ d.Close()
+ })
+ simpleDec = weak.Make(dec)
+ }
+ weakMu.Unlock()
+ return dec.DecodeAll(src, dst)
+}
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/snappy.go b/backend/vendor/github.com/klauspost/compress/zstd/snappy.go
index ec13594..336c288 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/snappy.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -197,7 +197,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
n, r.err = w.Write(r.block.output)
if r.err != nil {
- return written, err
+ return written, r.err
}
written += int64(n)
continue
@@ -239,7 +239,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
}
n, r.err = w.Write(r.block.output)
if r.err != nil {
- return written, err
+ return written, r.err
}
written += int64(n)
continue
@@ -257,7 +257,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
if !r.readFull(r.buf[:len(snappyMagicBody)], false) {
return written, r.err
}
- for i := 0; i < len(snappyMagicBody); i++ {
+ for i := range len(snappyMagicBody) {
if r.buf[i] != snappyMagicBody[i] {
println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i)
r.err = ErrSnappyCorrupt
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/zip.go b/backend/vendor/github.com/klauspost/compress/zstd/zip.go
index 29c15c8..3198d71 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/zip.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/zip.go
@@ -19,7 +19,7 @@ const ZipMethodWinZip = 93
const ZipMethodPKWare = 20
// zipReaderPool is the default reader pool.
-var zipReaderPool = sync.Pool{New: func() interface{} {
+var zipReaderPool = sync.Pool{New: func() any {
z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
if err != nil {
panic(err)
diff --git a/backend/vendor/github.com/klauspost/compress/zstd/zstd.go b/backend/vendor/github.com/klauspost/compress/zstd/zstd.go
index 4be7cc7..1a86971 100644
--- a/backend/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/backend/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -5,10 +5,11 @@ package zstd
import (
"bytes"
- "encoding/binary"
"errors"
"log"
"math"
+
+ "github.com/klauspost/compress/internal/le"
)
// enable debug printing
@@ -88,29 +89,33 @@ var (
// Close has been called.
ErrDecoderClosed = errors.New("decoder used after Close")
+ // ErrEncoderClosed will be returned if the Encoder was used after
+ // Close has been called.
+ ErrEncoderClosed = errors.New("encoder used after Close")
+
// ErrDecoderNilInput is returned when a nil Reader was provided
// and an operation other than Reset/DecodeAll/Close was attempted.
ErrDecoderNilInput = errors.New("nil input provided as reader")
)
-func println(a ...interface{}) {
+func println(a ...any) {
if debug || debugDecoder || debugEncoder {
log.Println(a...)
}
}
-func printf(format string, a ...interface{}) {
+func printf(format string, a ...any) {
if debug || debugDecoder || debugEncoder {
log.Printf(format, a...)
}
}
func load3232(b []byte, i int32) uint32 {
- return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
+ return le.Load32(b, i)
}
func load6432(b []byte, i int32) uint64 {
- return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:])
+ return le.Load64(b, i)
}
type byter interface {
diff --git a/backend/vendor/github.com/xdg-go/scram/.gitignore b/backend/vendor/github.com/xdg-go/scram/.gitignore
index e69de29..90b2037 100644
--- a/backend/vendor/github.com/xdg-go/scram/.gitignore
+++ b/backend/vendor/github.com/xdg-go/scram/.gitignore
@@ -0,0 +1,34 @@
+# Local Claude code settings
+.claude/settings.local.json
+
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool
+*.out
+
+# Go workspace file
+go.work
+go.work.sum
+
+# Dependency directories
+vendor/
+
+# Build output
+bin/
+dist/
+
+# IDE and editor files
+.vscode/
+.idea/
+*.swp
+*.swo
+*~
+.DS_Store
diff --git a/backend/vendor/github.com/xdg-go/scram/CHANGELOG.md b/backend/vendor/github.com/xdg-go/scram/CHANGELOG.md
index b833be5..fc4a1e8 100644
--- a/backend/vendor/github.com/xdg-go/scram/CHANGELOG.md
+++ b/backend/vendor/github.com/xdg-go/scram/CHANGELOG.md
@@ -1,5 +1,36 @@
# CHANGELOG
+## v1.2.0 - 2025-11-24
+
+### Added
+
+- **Channel binding support for SCRAM-PLUS variants** (RFC 5929, RFC 9266)
+- `GetStoredCredentialsWithError()` method that returns errors from PBKDF2
+ key derivation instead of panicking.
+- Support for Go 1.24+ stdlib `crypto/pbkdf2` package, which provides
+ FIPS 140-3 compliance when using SHA-256 or SHA-512 hash functions.
+
+### Changed
+
+- Minimum Go version bumped from 1.11 to 1.18.
+- Migrated from `github.com/xdg-go/pbkdf2` to stdlib `crypto/pbkdf2` on
+ Go 1.24+. Legacy Go versions (<1.24) continue using the external
+ library via build tags for backward compatibility.
+- Internal error handling improved for PBKDF2 key derivation failures.
+
+### Deprecated
+
+- `GetStoredCredentials()` is deprecated in favor of
+ `GetStoredCredentialsWithError()`. The old method panics on PBKDF2
+ errors to maintain backward compatibility but will be removed in a
+ future major version.
+
+### Notes
+
+- FIPS 140-3 compliance is available on Go 1.24+ when using SCRAM-SHA-256
+ or SCRAM-SHA-512 with appropriate salt lengths (≥16 bytes). SCRAM-SHA-1
+ is not FIPS-approved.
+
## v1.1.2 - 2022-12-07
- Bump stringprep dependency to v1.0.4 for upstream CVE fix.
diff --git a/backend/vendor/github.com/xdg-go/scram/CLAUDE.md b/backend/vendor/github.com/xdg-go/scram/CLAUDE.md
new file mode 100644
index 0000000..a19b513
--- /dev/null
+++ b/backend/vendor/github.com/xdg-go/scram/CLAUDE.md
@@ -0,0 +1,75 @@
+# CLAUDE.md
+
+This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
+
+## Project Overview
+
+This is a Go library implementing SCRAM (Salted Challenge Response Authentication Mechanism) per RFC-5802 and RFC-7677. It provides both client and server implementations supporting SHA-1, SHA-256, and SHA-512 hash functions.
+
+## Development Commands
+
+**Run tests:**
+```bash
+go test ./...
+```
+
+**Run tests with race detection (CI configuration):**
+```bash
+go test -race ./...
+```
+
+**Build (module-only, no binaries):**
+```bash
+go build ./...
+```
+
+**Run single test:**
+```bash
+go test -run TestName ./...
+```
+
+## Architecture
+
+### Core Components
+
+**Hash factory pattern:** The `HashGeneratorFcn` type (scram.go:23) is the entry point for creating clients and servers. Package-level variables `SHA1`, `SHA256`, `SHA512` provide pre-configured hash functions. All client/server creation flows through these hash generators.
+
+**Client (`client.go`):** Holds authentication configuration for a username/password/authzID tuple. Maintains a cache of derived keys (PBKDF2 results) indexed by `KeyFactors` (salt + iteration count). Thread-safe via RWMutex. Creates `ClientConversation` instances for individual auth attempts.
+
+**Server (`server.go`):** Holds credential lookup callback and nonce generator. Creates `ServerConversation` instances for individual auth attempts.
+
+**Conversations:** State machines implementing the SCRAM protocol exchange:
+- `ClientConversation` (client_conv.go): States are `clientStarting` → `clientFirst` → `clientFinal` → `clientDone`
+- `ServerConversation` (server_conv.go): States are `serverFirst` → `serverFinal` → `serverDone`
+
+Both use a `Step(string) (string, error)` method to advance through protocol stages.
+
+**Message parsing (`parse.go`):** Parses SCRAM protocol messages into structs. Separate parsers for client-first, server-first, client-final, and server-final messages.
+
+**Shared utilities (`common.go`):**
+- `NonceGeneratorFcn`: Default uses base64-encoded 24 bytes from crypto/rand
+- `derivedKeys`: Struct caching ClientKey, StoredKey, ServerKey
+- `KeyFactors`: Salt + iteration count, used as cache key
+- `StoredCredentials`: What servers must store for each user
+- `CredentialLookup`: Callback type servers use to retrieve stored credentials
+
+### Key Design Patterns
+
+**Dependency injection:** Server requires a `CredentialLookup` callback, making storage mechanism pluggable.
+
+**Caching:** Client caches expensive PBKDF2 results in a map keyed by `KeyFactors`. This optimizes reconnection scenarios where salt/iteration count remain constant.
+
+**Factory methods:** Hash generators provide `.NewClient()` and `.NewServer()` methods that handle SASLprep normalization. Alternative `.NewClientUnprepped()` exists for custom normalization.
+
+**Configuration via chaining:** Both Client and Server support `.WithNonceGenerator()` for custom nonce generation (primarily for testing).
+
+### Security Considerations
+
+- Default minimum PBKDF2 iterations: 4096 (client.go:45)
+- All string comparisons use `hmac.Equal()` for constant-time comparison
+- SASLprep normalization applied by default via xdg-go/stringprep dependency
+- Nonce generation uses crypto/rand
+
+## Testing
+
+Tests include conversation state machine tests (client_conv_test.go, server_conv_test.go), integration tests, and examples (doc_test.go). Test data in testdata_test.go.
diff --git a/backend/vendor/github.com/xdg-go/scram/README.md b/backend/vendor/github.com/xdg-go/scram/README.md
index 3a46f5c..8f94e33 100644
--- a/backend/vendor/github.com/xdg-go/scram/README.md
+++ b/backend/vendor/github.com/xdg-go/scram/README.md
@@ -8,12 +8,19 @@
Package scram provides client and server implementations of the Salted
Challenge Response Authentication Mechanism (SCRAM) described in
-[RFC-5802](https://tools.ietf.org/html/rfc5802) and
-[RFC-7677](https://tools.ietf.org/html/rfc7677).
+- [RFC-5802](https://tools.ietf.org/html/rfc5802)
+- [RFC-5929](https://tools.ietf.org/html/rfc5929)
+- [RFC-7677](https://tools.ietf.org/html/rfc7677)
+- [RFC-9266](https://tools.ietf.org/html/rfc9266)
It includes both client and server side support.
-Channel binding and extensions are not (yet) supported.
+Channel binding is supported for SCRAM-PLUS variants, including:
+- `tls-unique` (RFC 5929) - insecure, but required
+- `tls-server-end-point` (RFC 5929) - works with all TLS versions
+- `tls-exporter` (RFC 9266) - recommended for TLS 1.3+
+
+SCRAM message extensions are not supported.
## Examples
@@ -64,6 +71,41 @@ Channel binding and extensions are not (yet) supported.
return ""
}
+### Client side with channel binding (SCRAM-PLUS)
+
+ package main
+
+ import (
+ "crypto/tls"
+ "github.com/xdg-go/scram"
+ )
+
+ func main() {
+ // Establish TLS connection
+ tlsConn, err := tls.Dial("tcp", "server:port", &tls.Config{MinVersion: tls.VersionTLS13})
+ if err != nil {
+ panic(err)
+ }
+ defer tlsConn.Close()
+
+ // Get Client with username, password
+ client, err := scram.SHA256.NewClient("mulder", "trustno1", "")
+ if err != nil {
+ panic(err)
+ }
+
+ // Create channel binding from TLS connection (TLS 1.3 example)
+ // Use NewTLSExporterBinding for TLS 1.3+, NewTLSServerEndpointBinding for all TLS versions
+ channelBinding, err := scram.NewTLSExporterBinding(&tlsConn.ConnectionState())
+ if err != nil {
+ panic(err)
+ }
+
+ // Create conversation with channel binding for SCRAM-SHA-256-PLUS
+ conv := client.NewConversationWithChannelBinding(channelBinding)
+ // ... rest of authentication conversation
+ }
+
## Copyright and License
Copyright 2018 by David A. Golden. All rights reserved.
diff --git a/backend/vendor/github.com/xdg-go/scram/channel_binding.go b/backend/vendor/github.com/xdg-go/scram/channel_binding.go
new file mode 100644
index 0000000..52b1b0c
--- /dev/null
+++ b/backend/vendor/github.com/xdg-go/scram/channel_binding.go
@@ -0,0 +1,144 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "hash"
+)
+
+// ChannelBindingType represents the type of channel binding to use with
+// SCRAM-PLUS authentication variants. The type must match one of the
+// channel binding types defined in RFC 5056, RFC 5929, or RFC 9266.
+type ChannelBindingType string
+
+const (
+ // ChannelBindingNone indicates no channel binding is used.
+ ChannelBindingNone ChannelBindingType = ""
+
+ // ChannelBindingTLSUnique uses the TLS Finished message from the first
+ // TLS handshake (RFC 5929). This is considered insecure, but is included
+ // as required by RFC 5802.
+ ChannelBindingTLSUnique ChannelBindingType = "tls-unique"
+
+ // ChannelBindingTLSServerEndpoint uses a hash of the server's certificate
+ // (RFC 5929). This works with all TLS versions including TLS 1.3.
+ ChannelBindingTLSServerEndpoint ChannelBindingType = "tls-server-end-point"
+
+ // ChannelBindingTLSExporter uses TLS Exported Keying Material with the
+ // label "EXPORTER-Channel-Binding" (RFC 9266). This is the recommended
+ // channel binding type for TLS 1.3.
+ ChannelBindingTLSExporter ChannelBindingType = "tls-exporter"
+)
+
+// ChannelBinding holds the channel binding type and data for SCRAM-PLUS
+// authentication. Use constructors to create type-specific bindings.
+type ChannelBinding struct {
+ Type ChannelBindingType
+ Data []byte
+}
+
+// IsSupported returns true if the channel binding is configured with a
+// non-empty type and data.
+func (cb ChannelBinding) IsSupported() bool {
+ return cb.Type != ChannelBindingNone && len(cb.Data) > 0
+}
+
+// Matches returns true if this channel binding matches another channel
+// binding in both type and data.
+func (cb ChannelBinding) Matches(other ChannelBinding) bool {
+ if cb.Type != other.Type {
+ return false
+ }
+ return hmac.Equal(cb.Data, other.Data)
+}
+
+// NewTLSUniqueBinding creates a ChannelBinding for tls-unique channel binding.
+// Since Go's standard library doesn't expose the TLS Finished message,
+// applications must provide the data directly.
+//
+// Note: tls-unique is considered insecure and should generally be avoided.
+func NewTLSUniqueBinding(data []byte) ChannelBinding {
+ // Create a defensive copy to prevent caller from modifying the data
+ cbData := make([]byte, len(data))
+ copy(cbData, data)
+ return ChannelBinding{
+ Type: ChannelBindingTLSUnique,
+ Data: cbData,
+ }
+}
+
+// NewTLSServerEndpointBinding creates a ChannelBinding for tls-server-end-point
+// channel binding per RFC 5929. It extracts the server's certificate from
+// the TLS connection state and hashes it using the appropriate hash function
+// based on the certificate's signature algorithm.
+//
+// This works with all TLS versions including TLS 1.3.
+func NewTLSServerEndpointBinding(connState *tls.ConnectionState) (ChannelBinding, error) {
+ if connState == nil {
+ return ChannelBinding{}, errors.New("connection state is nil")
+ }
+
+ if len(connState.PeerCertificates) == 0 {
+ return ChannelBinding{}, errors.New("no peer certificates")
+ }
+
+ cert := connState.PeerCertificates[0]
+
+ // Determine hash algorithm per RFC 5929
+ var h hash.Hash
+ switch cert.SignatureAlgorithm {
+ case x509.MD5WithRSA, x509.SHA1WithRSA, x509.DSAWithSHA1,
+ x509.ECDSAWithSHA1:
+ h = sha256.New() // Use SHA-256 for MD5/SHA-1
+ case x509.SHA256WithRSA, x509.SHA256WithRSAPSS,
+ x509.ECDSAWithSHA256:
+ h = sha256.New()
+ case x509.SHA384WithRSA, x509.SHA384WithRSAPSS,
+ x509.ECDSAWithSHA384:
+ h = sha512.New384()
+ case x509.SHA512WithRSA, x509.SHA512WithRSAPSS,
+ x509.ECDSAWithSHA512:
+ h = sha512.New()
+ default:
+ return ChannelBinding{}, fmt.Errorf("unsupported signature algorithm: %v",
+ cert.SignatureAlgorithm)
+ }
+
+ h.Write(cert.Raw)
+ return ChannelBinding{
+ Type: ChannelBindingTLSServerEndpoint,
+ Data: h.Sum(nil),
+ }, nil
+}
+
+// NewTLSExporterBinding creates a ChannelBinding for tls-exporter channel binding
+// per RFC 9266. It uses TLS Exported Keying Material with the label
+// "EXPORTER-Channel-Binding" and an empty context.
+//
+// This is the recommended channel binding type for TLS 1.3+.
+func NewTLSExporterBinding(connState *tls.ConnectionState) (ChannelBinding, error) {
+ if connState == nil {
+ return ChannelBinding{}, errors.New("connection state is nil")
+ }
+
+ cbData, err := connState.ExportKeyingMaterial("EXPORTER-Channel-Binding", nil, 32)
+ if err != nil {
+ return ChannelBinding{}, fmt.Errorf("failed to export keying material: %w", err)
+ }
+
+ return ChannelBinding{
+ Type: ChannelBindingTLSExporter,
+ Data: cbData,
+ }, nil
+}
diff --git a/backend/vendor/github.com/xdg-go/scram/client.go b/backend/vendor/github.com/xdg-go/scram/client.go
index 5b53021..906fe6e 100644
--- a/backend/vendor/github.com/xdg-go/scram/client.go
+++ b/backend/vendor/github.com/xdg-go/scram/client.go
@@ -8,8 +8,6 @@ package scram
import (
"sync"
-
- "github.com/xdg-go/pbkdf2"
)
// Client implements the client side of SCRAM authentication. It holds
@@ -81,38 +79,109 @@ func (c *Client) NewConversation() *ClientConversation {
}
}
-func (c *Client) getDerivedKeys(kf KeyFactors) derivedKeys {
+// NewConversationAdvertisingChannelBinding constructs a client-side
+// authentication conversation that advertises channel binding support without
+// using it. This generates the "y" GS2 flag, indicating the client supports
+// channel binding but the server did not advertise a PLUS variant mechanism.
+//
+// This helps detect downgrade attacks where a MITM strips PLUS mechanism
+// advertisements from the server's mechanism list. If the server actually
+// advertised PLUS variants, it will reject the "y" flag as a downgrade attack.
+//
+// Use this when:
+// - Your application supports channel binding (has access to TLS connection state)
+// - SASL mechanism negotiation showed the server does NOT advertise PLUS variants
+// (e.g., server advertised "SCRAM-SHA-256" but not "SCRAM-SHA-256-PLUS")
+//
+// Conversations cannot be reused, so this must be called for each new
+// authentication attempt.
+func (c *Client) NewConversationAdvertisingChannelBinding() *ClientConversation {
+ c.RLock()
+ defer c.RUnlock()
+ return &ClientConversation{
+ client: c,
+ nonceGen: c.nonceGen,
+ hashGen: c.hashGen,
+ minIters: c.minIters,
+ advertiseChannelBinding: true,
+ }
+}
+
+// NewConversationWithChannelBinding constructs a client-side authentication
+// conversation with channel binding for SCRAM-PLUS authentication. Channel
+// binding is connection-specific, so a new conversation should be created for
+// each connection being authenticated. Conversations cannot be reused, so this
+// must be called for each new authentication attempt.
+func (c *Client) NewConversationWithChannelBinding(cb ChannelBinding) *ClientConversation {
+ c.RLock()
+ defer c.RUnlock()
+ return &ClientConversation{
+ client: c,
+ nonceGen: c.nonceGen,
+ hashGen: c.hashGen,
+ minIters: c.minIters,
+ channelBinding: cb,
+ }
+}
+
+func (c *Client) getDerivedKeys(kf KeyFactors) (derivedKeys, error) {
dk, ok := c.getCache(kf)
if !ok {
- dk = c.computeKeys(kf)
+ var err error
+ dk, err = c.computeKeys(kf)
+ if err != nil {
+ return derivedKeys{}, err
+ }
c.setCache(kf, dk)
}
- return dk
+ return dk, nil
}
// GetStoredCredentials takes a salt and iteration count structure and
-// provides the values that must be stored by a server to authentication a
+// provides the values that must be stored by a server to authenticate a
// user. These values are what the Server credential lookup function must
// return for a given username.
+//
+// Deprecated: Use GetStoredCredentialsWithError for proper error handling.
+// This method panics if PBKDF2 key derivation fails, which should only
+// occur with invalid KeyFactors parameters.
func (c *Client) GetStoredCredentials(kf KeyFactors) StoredCredentials {
- dk := c.getDerivedKeys(kf)
+ creds, err := c.GetStoredCredentialsWithError(kf)
+ if err != nil {
+ panic("scram: GetStoredCredentials failed: " + err.Error())
+ }
+ return creds
+}
+
+// GetStoredCredentialsWithError takes a salt and iteration count structure and
+// provides the values that must be stored by a server to authenticate a
+// user. These values are what the Server credential lookup function must
+// return for a given username.
+//
+// Returns an error if PBKDF2 key derivation fails, which can occur with
+// invalid parameters in Go 1.24+ (e.g., invalid iteration counts or key lengths).
+func (c *Client) GetStoredCredentialsWithError(kf KeyFactors) (StoredCredentials, error) {
+ dk, err := c.getDerivedKeys(kf)
return StoredCredentials{
KeyFactors: kf,
StoredKey: dk.StoredKey,
ServerKey: dk.ServerKey,
- }
+ }, err
}
-func (c *Client) computeKeys(kf KeyFactors) derivedKeys {
+func (c *Client) computeKeys(kf KeyFactors) (derivedKeys, error) {
h := c.hashGen()
- saltedPassword := pbkdf2.Key([]byte(c.password), []byte(kf.Salt), kf.Iters, h.Size(), c.hashGen)
+ saltedPassword, err := pbkdf2Key(c.hashGen, c.password, []byte(kf.Salt), kf.Iters, h.Size())
+ if err != nil {
+ return derivedKeys{}, err
+ }
clientKey := computeHMAC(c.hashGen, saltedPassword, []byte("Client Key"))
return derivedKeys{
ClientKey: clientKey,
StoredKey: computeHash(c.hashGen, clientKey),
ServerKey: computeHMAC(c.hashGen, saltedPassword, []byte("Server Key")),
- }
+ }, nil
}
func (c *Client) getCache(kf KeyFactors) (derivedKeys, bool) {
@@ -126,5 +195,4 @@ func (c *Client) setCache(kf KeyFactors, dk derivedKeys) {
c.Lock()
defer c.Unlock()
c.cache[kf] = dk
- return
}
diff --git a/backend/vendor/github.com/xdg-go/scram/client_conv.go b/backend/vendor/github.com/xdg-go/scram/client_conv.go
index 8340568..4da6c24 100644
--- a/backend/vendor/github.com/xdg-go/scram/client_conv.go
+++ b/backend/vendor/github.com/xdg-go/scram/client_conv.go
@@ -27,16 +27,18 @@ const (
// conversation with a server. A new conversation must be created for
// each authentication attempt.
type ClientConversation struct {
- client *Client
- nonceGen NonceGeneratorFcn
- hashGen HashGeneratorFcn
- minIters int
- state clientState
- valid bool
- gs2 string
- nonce string
- c1b string
- serveSig []byte
+ client *Client
+ nonceGen NonceGeneratorFcn
+ hashGen HashGeneratorFcn
+ minIters int
+ state clientState
+ valid bool
+ gs2 string
+ nonce string
+ c1b string
+ serveSig []byte
+ channelBinding ChannelBinding
+ advertiseChannelBinding bool // if true, use "y" flag instead of "n" or "p"
}
// Step takes a string provided from a server (or just an empty string for the
@@ -99,10 +101,19 @@ func (cc *ClientConversation) finalMsg(s1 string) (string, error) {
return "", fmt.Errorf("server requested too few iterations (%d)", msg.iters)
}
+ // Create channel binding data per RFC 5802:
+ // - For "p" flag: gs2-header + channel-binding-data
+ // - For "n" or "y" flags: gs2-header only (no channel-binding-data)
+ cbindData := []byte(cc.gs2)
+ if cc.channelBinding.IsSupported() {
+ // Only append channel binding data when actually using it (flag "p")
+ cbindData = append(cbindData, cc.channelBinding.Data...)
+ }
+
// Create client-final-message-without-proof
c2wop := fmt.Sprintf(
"c=%s,r=%s",
- base64.StdEncoding.EncodeToString([]byte(cc.gs2)),
+ base64.StdEncoding.EncodeToString(cbindData),
cc.nonce,
)
@@ -110,11 +121,17 @@ func (cc *ClientConversation) finalMsg(s1 string) (string, error) {
authMsg := cc.c1b + "," + s1 + "," + c2wop
// Get derived keys from client cache
- dk := cc.client.getDerivedKeys(KeyFactors{Salt: string(msg.salt), Iters: msg.iters})
+ dk, err := cc.client.getDerivedKeys(KeyFactors{Salt: string(msg.salt), Iters: msg.iters})
+ if err != nil {
+ return "", err
+ }
// Create proof as clientkey XOR clientsignature
clientSignature := computeHMAC(cc.hashGen, dk.StoredKey, []byte(authMsg))
- clientProof := xorBytes(dk.ClientKey, clientSignature)
+ clientProof, err := xorBytes(dk.ClientKey, clientSignature)
+ if err != nil {
+ return "", err
+ }
proof := base64.StdEncoding.EncodeToString(clientProof)
// Cache ServerSignature for later validation
@@ -142,8 +159,23 @@ func (cc *ClientConversation) validateServer(s2 string) (string, error) {
}
func (cc *ClientConversation) gs2Header() string {
- if cc.client.authzID == "" {
- return "n,,"
+ var cbFlag string
+
+ if cc.channelBinding.IsSupported() {
+ // Client is using channel binding with specific type
+ cbFlag = fmt.Sprintf("p=%s", cc.channelBinding.Type)
+ } else if cc.advertiseChannelBinding {
+ // Client supports channel binding but server didn't advertise PLUS
+ cbFlag = "y"
+ } else {
+ // Client doesn't support channel binding
+ cbFlag = "n"
}
- return fmt.Sprintf("n,%s,", encodeName(cc.client.authzID))
+
+ authzPart := ""
+ if cc.client.authzID != "" {
+ authzPart = "a=" + encodeName(cc.client.authzID)
+ }
+
+ return fmt.Sprintf("%s,%s,", cbFlag, authzPart)
}
diff --git a/backend/vendor/github.com/xdg-go/scram/common.go b/backend/vendor/github.com/xdg-go/scram/common.go
index cb705cb..fa0a81b 100644
--- a/backend/vendor/github.com/xdg-go/scram/common.go
+++ b/backend/vendor/github.com/xdg-go/scram/common.go
@@ -10,6 +10,7 @@ import (
"crypto/hmac"
"crypto/rand"
"encoding/base64"
+ "errors"
"strings"
)
@@ -58,10 +59,50 @@ type StoredCredentials struct {
// StoredCredentials.
type CredentialLookup func(string) (StoredCredentials, error)
+// Server error values as defined in RFC-5802 and RFC-7677. These are returned
+// by the server in error responses as "e=".
+const (
+ // ErrInvalidEncoding indicates the client message had invalid encoding
+ ErrInvalidEncoding = "e=invalid-encoding"
+
+ // ErrExtensionsNotSupported indicates unrecognized 'm' value
+ ErrExtensionsNotSupported = "e=extensions-not-supported"
+
+ // ErrInvalidProof indicates the authentication proof from the client was invalid
+ ErrInvalidProof = "e=invalid-proof"
+
+ // ErrChannelBindingsDontMatch indicates channel binding data didn't match expected value
+ ErrChannelBindingsDontMatch = "e=channel-bindings-dont-match"
+
+ // ErrServerDoesSupportChannelBinding indicates server does support channel
+ // binding. This is returned if a downgrade attack is detected or if the
+ // client does not support binding and channel binding is required.
+ ErrServerDoesSupportChannelBinding = "e=server-does-support-channel-binding"
+
+ // ErrChannelBindingNotSupported indicates channel binding is not supported
+ ErrChannelBindingNotSupported = "e=channel-binding-not-supported"
+
+ // ErrUnsupportedChannelBindingType indicates the requested channel binding type is not supported
+ ErrUnsupportedChannelBindingType = "e=unsupported-channel-binding-type"
+
+ // ErrUnknownUser indicates the specified user does not exist
+ ErrUnknownUser = "e=unknown-user"
+
+ // ErrInvalidUsernameEncoding indicates invalid username encoding (invalid UTF-8 or SASLprep failed)
+ ErrInvalidUsernameEncoding = "e=invalid-username-encoding"
+
+ // ErrNoResources indicates the server is out of resources
+ ErrNoResources = "e=no-resources"
+
+ // ErrOtherError is a catch-all for unspecified errors. The server may substitute
+ // the real reason with this error to prevent information disclosure.
+ ErrOtherError = "e=other-error"
+)
+
func defaultNonceGenerator() string {
raw := make([]byte, 24)
nonce := make([]byte, base64.StdEncoding.EncodedLen(len(raw)))
- rand.Read(raw)
+ _, _ = rand.Read(raw)
base64.StdEncoding.Encode(nonce, raw)
return string(nonce)
}
@@ -70,11 +111,6 @@ func encodeName(s string) string {
return strings.Replace(strings.Replace(s, "=", "=3D", -1), ",", "=2C", -1)
}
-func decodeName(s string) (string, error) {
- // TODO Check for = not followed by 2C or 3D
- return strings.Replace(strings.Replace(s, "=2C", ",", -1), "=3D", "=", -1), nil
-}
-
func computeHash(hg HashGeneratorFcn, b []byte) []byte {
h := hg()
h.Write(b)
@@ -87,11 +123,13 @@ func computeHMAC(hg HashGeneratorFcn, key, data []byte) []byte {
return mac.Sum(nil)
}
-func xorBytes(a, b []byte) []byte {
- // TODO check a & b are same length, or just xor to smallest
+func xorBytes(a, b []byte) ([]byte, error) {
+ if len(a) != len(b) {
+ return nil, errors.New("internal error: xorBytes arguments must have equal length")
+ }
xor := make([]byte, len(a))
for i := range a {
xor[i] = a[i] ^ b[i]
}
- return xor
+ return xor, nil
}
diff --git a/backend/vendor/github.com/xdg-go/scram/doc.go b/backend/vendor/github.com/xdg-go/scram/doc.go
index 82e8aee..3314cc1 100644
--- a/backend/vendor/github.com/xdg-go/scram/doc.go
+++ b/backend/vendor/github.com/xdg-go/scram/doc.go
@@ -5,22 +5,55 @@
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
// Package scram provides client and server implementations of the Salted
-// Challenge Response Authentication Mechanism (SCRAM) described in RFC-5802
-// and RFC-7677.
+// Challenge Response Authentication Mechanism (SCRAM) described in RFC-5802,
+// RFC-7677, and RFC-9266.
//
-// Usage
+// # Usage
//
// The scram package provides variables, `SHA1`, `SHA256`, and `SHA512`, that
// are used to construct Client or Server objects.
//
-// clientSHA1, err := scram.SHA1.NewClient(username, password, authID)
-// clientSHA256, err := scram.SHA256.NewClient(username, password, authID)
-// clientSHA512, err := scram.SHA512.NewClient(username, password, authID)
+// clientSHA1, err := scram.SHA1.NewClient(username, password, authID)
+// clientSHA256, err := scram.SHA256.NewClient(username, password, authID)
+// clientSHA512, err := scram.SHA512.NewClient(username, password, authID)
//
-// serverSHA1, err := scram.SHA1.NewServer(credentialLookupFcn)
-// serverSHA256, err := scram.SHA256.NewServer(credentialLookupFcn)
-// serverSHA512, err := scram.SHA512.NewServer(credentialLookupFcn)
+// serverSHA1, err := scram.SHA1.NewServer(credentialLookupFcn)
+// serverSHA256, err := scram.SHA256.NewServer(credentialLookupFcn)
+// serverSHA512, err := scram.SHA512.NewServer(credentialLookupFcn)
//
// These objects are used to construct ClientConversation or
// ServerConversation objects that are used to carry out authentication.
+//
+// clientConv := client.NewConversation()
+// serverConv := server.NewConversation()
+//
+// # Channel Binding (SCRAM-PLUS)
+//
+// The scram package supports channel binding for SCRAM-PLUS authentication
+// variants as described in RFC-5802, RFC-5929, and RFC-9266. Channel binding
+// cryptographically binds the SCRAM authentication to an underlying TLS
+// connection, preventing man-in-the-middle attacks.
+//
+// To use channel binding, create conversations with channel binding data
+// obtained from the TLS connection:
+//
+// // Client example with tls-exporter (TLS 1.3+)
+// client, _ := scram.SHA256.NewClient(username, password, "")
+// channelBinding, _ := scram.NewTLSExporterBinding(&tlsConn.ConnectionState())
+// clientConv := client.NewConversationWithChannelBinding(channelBinding)
+//
+// // Server conversation with the same channel binding
+// server, _ := scram.SHA256.NewServer(credentialLookupFcn)
+// serverConv := server.NewConversationWithChannelBinding(channelBinding)
+//
+// Helper functions are provided to create ChannelBinding values from TLS connections:
+// - NewTLSServerEndpointBinding: Uses server certificate hash (RFC 5929, all TLS versions)
+// - NewTLSExporterBinding: Uses exported keying material (RFC 9266, recommended for TLS 1.3+)
+//
+// Channel binding is configured on conversations rather than clients or servers
+// because binding data is connection-specific.
+//
+// Channel binding type negotiation is not defined by the SCRAM protocol.
+// Applications must ensure both client and server agree on the same channel binding
+// type.
package scram
diff --git a/backend/vendor/github.com/xdg-go/scram/parse.go b/backend/vendor/github.com/xdg-go/scram/parse.go
index 722f604..3585c62 100644
--- a/backend/vendor/github.com/xdg-go/scram/parse.go
+++ b/backend/vendor/github.com/xdg-go/scram/parse.go
@@ -15,11 +15,13 @@ import (
)
type c1Msg struct {
- gs2Header string
- authzID string
- username string
- nonce string
- c1b string
+ gs2Header string
+ gs2BindFlag string // "n", "y", or "p"
+ channelBinding string // channel binding type name if gs2BindFlag is "p"
+ authzID string
+ username string
+ nonce string
+ c1b string
}
type c2Msg struct {
@@ -48,16 +50,25 @@ func parseField(s, k string) (string, error) {
return t, nil
}
-func parseGS2Flag(s string) (string, error) {
- if s[0] == 'p' {
- return "", fmt.Errorf("channel binding requested but not supported")
+// parseGS2Flag returns flag, channel binding type, and error.
+func parseGS2Flag(s string) (string, string, error) {
+ if s == "n" || s == "y" {
+ return s, "", nil
}
- if s == "n" || s == "y" {
- return s, nil
+ // If not "n" or "y", must be "p=..." or error.
+ cbType, err := parseField(s, "p")
+ if err != nil {
+ return "", "", fmt.Errorf("error parsing '%s' for gs2 flag", s)
}
- return "", fmt.Errorf("error parsing '%s' for gs2 flag", s)
+ switch ChannelBindingType(cbType) {
+ case ChannelBindingTLSUnique, ChannelBindingTLSServerEndpoint, ChannelBindingTLSExporter:
+ // valid channel binding type
+ default:
+ return "", "", fmt.Errorf("invalid channel binding type: %s", cbType)
+ }
+ return "p", cbType, nil
}
func parseFieldBase64(s, k string) ([]byte, error) {
@@ -68,7 +79,7 @@ func parseFieldBase64(s, k string) ([]byte, error) {
dec, err := base64.StdEncoding.DecodeString(raw)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed decoding field '%s': %v", k, err)
}
return dec, nil
@@ -89,19 +100,18 @@ func parseFieldInt(s, k string) (int, error) {
}
func parseClientFirst(c1 string) (msg c1Msg, err error) {
-
fields := strings.Split(c1, ",")
if len(fields) < 4 {
err = errors.New("not enough fields in first server message")
return
}
- gs2flag, err := parseGS2Flag(fields[0])
+ msg.gs2BindFlag, msg.channelBinding, err = parseGS2Flag(fields[0])
if err != nil {
return
}
- // 'a' field is optional
+ // authzID content is optional, but the field must be present.
if len(fields[1]) > 0 {
msg.authzID, err = parseField(fields[1], "a")
if err != nil {
@@ -109,9 +119,6 @@ func parseClientFirst(c1 string) (msg c1Msg, err error) {
}
}
- // Recombine and save the gs2 header
- msg.gs2Header = gs2flag + "," + msg.authzID + ","
-
// Check for unsupported extensions field "m".
if strings.HasPrefix(fields[2], "m=") {
err = errors.New("SCRAM message extensions are not supported")
@@ -128,6 +135,10 @@ func parseClientFirst(c1 string) (msg c1Msg, err error) {
return
}
+ // Recombine the gs2Header: gs2-cbind-flag "," [ authzid ] ","
+ msg.gs2Header = fields[0] + "," + fields[1] + ","
+
+ // Recombine the client-first-message-bare: username "," nonce
msg.c1b = strings.Join(fields[2:], ",")
return
diff --git a/backend/vendor/github.com/xdg-go/scram/pbkdf2_go124.go b/backend/vendor/github.com/xdg-go/scram/pbkdf2_go124.go
new file mode 100644
index 0000000..6708ec4
--- /dev/null
+++ b/backend/vendor/github.com/xdg-go/scram/pbkdf2_go124.go
@@ -0,0 +1,18 @@
+// Copyright 2025 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build go1.24
+
+package scram
+
+import (
+ "crypto/pbkdf2"
+ "hash"
+)
+
+func pbkdf2Key(h func() hash.Hash, password string, salt []byte, iter, keyLength int) ([]byte, error) {
+ return pbkdf2.Key(h, password, salt, iter, keyLength)
+}
diff --git a/backend/vendor/github.com/xdg-go/scram/pbkdf2_legacy.go b/backend/vendor/github.com/xdg-go/scram/pbkdf2_legacy.go
new file mode 100644
index 0000000..a81ab41
--- /dev/null
+++ b/backend/vendor/github.com/xdg-go/scram/pbkdf2_legacy.go
@@ -0,0 +1,19 @@
+// Copyright 2025 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build !go1.24
+
+package scram
+
+import (
+ "hash"
+
+ "github.com/xdg-go/pbkdf2"
+)
+
+func pbkdf2Key(h func() hash.Hash, password string, salt []byte, iter, keyLength int) ([]byte, error) {
+ return pbkdf2.Key([]byte(password), salt, iter, keyLength, h), nil
+}
diff --git a/backend/vendor/github.com/xdg-go/scram/server.go b/backend/vendor/github.com/xdg-go/scram/server.go
index b119b36..e116623 100644
--- a/backend/vendor/github.com/xdg-go/scram/server.go
+++ b/backend/vendor/github.com/xdg-go/scram/server.go
@@ -48,3 +48,62 @@ func (s *Server) NewConversation() *ServerConversation {
credentialCB: s.credentialCB,
}
}
+
+// NewConversationWithChannelBinding constructs a server-side authentication
+// conversation with channel binding for SCRAM-PLUS authentication.
+//
+// This signals that the server advertised PLUS mechanism variants (e.g.,
+// SCRAM-SHA-256-PLUS) during SASL negotiation, but channel binding is NOT required.
+// Clients may authenticate using either the base mechanism (e.g., SCRAM-SHA-256)
+// or the PLUS variant (e.g., SCRAM-SHA-256-PLUS).
+//
+// The server will:
+// - Accept clients without channel binding support (using "n" flag)
+// - Accept clients with matching channel binding (using "p" flag)
+// - Reject downgrade attacks (clients using "y" flag when PLUS was advertised)
+//
+// Channel binding is connection-specific, so a new conversation should be
+// created for each connection being authenticated.
+// Conversations cannot be reused, so this must be called for each new
+// authentication attempt.
+func (s *Server) NewConversationWithChannelBinding(cb ChannelBinding) *ServerConversation {
+ s.RLock()
+ defer s.RUnlock()
+ return &ServerConversation{
+ nonceGen: s.nonceGen,
+ hashGen: s.hashGen,
+ credentialCB: s.credentialCB,
+ channelBinding: cb,
+ }
+}
+
+// NewConversationWithChannelBindingRequired constructs a server-side authentication
+// conversation with mandatory channel binding for SCRAM-PLUS authentication.
+//
+// This signals that the server advertised ONLY SCRAM-PLUS mechanism variants
+// (e.g., only SCRAM-SHA-256-PLUS, not the base SCRAM-SHA-256) during SASL negotiation.
+// Channel binding is required for all authentication attempts.
+//
+// The server will:
+// - Accept only clients with matching channel binding (using "p" flag)
+// - Reject clients without channel binding support (using "n" flag)
+// - Reject downgrade attacks (clients using "y" flag when PLUS was advertised)
+//
+// This is intended for high-security deployments that advertise only SCRAM-PLUS
+// variants and want to enforce channel binding as mandatory.
+//
+// Channel binding is connection-specific, so a new conversation should be
+// created for each connection being authenticated.
+// Conversations cannot be reused, so this must be called for each new
+// authentication attempt.
+func (s *Server) NewConversationWithChannelBindingRequired(cb ChannelBinding) *ServerConversation {
+ s.RLock()
+ defer s.RUnlock()
+ return &ServerConversation{
+ nonceGen: s.nonceGen,
+ hashGen: s.hashGen,
+ credentialCB: s.credentialCB,
+ channelBinding: cb,
+ requireChannelBinding: true,
+ }
+}
diff --git a/backend/vendor/github.com/xdg-go/scram/server_conv.go b/backend/vendor/github.com/xdg-go/scram/server_conv.go
index 9c8838c..2874888 100644
--- a/backend/vendor/github.com/xdg-go/scram/server_conv.go
+++ b/backend/vendor/github.com/xdg-go/scram/server_conv.go
@@ -25,18 +25,22 @@ const (
// conversation with a client. A new conversation must be created for
// each authentication attempt.
type ServerConversation struct {
- nonceGen NonceGeneratorFcn
- hashGen HashGeneratorFcn
- credentialCB CredentialLookup
- state serverState
- credential StoredCredentials
- valid bool
- gs2Header string
- username string
- authzID string
- nonce string
- c1b string
- s1 string
+ nonceGen NonceGeneratorFcn
+ hashGen HashGeneratorFcn
+ credentialCB CredentialLookup
+ state serverState
+ credential StoredCredentials
+ valid bool
+ gs2Header string
+ username string
+ authzID string
+ nonce string
+ c1b string
+ s1 string
+ channelBinding ChannelBinding
+ requireChannelBinding bool
+ clientCBType string
+ clientCBFlag string
}
// Step takes a string provided from a client and attempts to move the
@@ -81,6 +85,65 @@ func (sc *ServerConversation) AuthzID() string {
return sc.authzID
}
+// validateChannelBindingFlag validates the client's channel binding flag against
+// server configuration. The validation logic follows RFC 5802 section 6, but
+// extends those semantics to cover the case of required channel binding.
+//
+// Client flag validation:
+// - "n": Client doesn't support channel binding
+// - "y": Client supports channel binding but server didn't advertise PLUS
+// - "p": Client requires channel binding with specific type
+//
+// Returns server error string (empty if validation passes) and error.
+func (sc *ServerConversation) validateChannelBindingFlag() (string, error) {
+ advertised := sc.channelBinding.IsSupported()
+
+ switch sc.clientCBFlag {
+ case "n":
+ // Client doesn't support channel binding
+ if sc.requireChannelBinding {
+ // Policy violation: server requires channel binding
+ // Use ErrServerDoesSupportChannelBinding (defined for downgrade attacks)
+ // as the best available match to signal that server requires channel binding
+ return ErrServerDoesSupportChannelBinding,
+ errors.New("server requires channel binding but client doesn't support it")
+ }
+ // OK: server either doesn't advertise PLUS or advertises it optionally
+ return "", nil
+
+ case "y":
+ // Client supports channel binding but thinks server doesn't advertise PLUS
+ if advertised {
+ // Downgrade attack: we advertised PLUS but client didn't see it
+ return ErrServerDoesSupportChannelBinding,
+ errors.New("downgrade attack detected: client used 'y' but server advertised PLUS")
+ }
+ // OK: we didn't advertise PLUS, client correctly detected this
+ return "", nil
+
+ case "p":
+ // Client requires channel binding with specific type
+ if !advertised {
+ // Server doesn't support channel binding
+ return ErrChannelBindingNotSupported,
+ errors.New("client requires channel binding but server doesn't support it")
+ }
+ if ChannelBindingType(sc.clientCBType) != sc.channelBinding.Type {
+ // Server supports channel binding but not the requested type
+ return ErrUnsupportedChannelBindingType,
+ fmt.Errorf("client requested %s but server only supports %s",
+ sc.clientCBType, sc.channelBinding.Type)
+ }
+ // OK: channel binding type matches
+ return "", nil
+
+ default:
+ // Invalid flag (should have been caught by parser)
+ return ErrOtherError,
+ fmt.Errorf("invalid channel binding flag: %s", sc.clientCBFlag)
+ }
+}
+
func (sc *ServerConversation) firstMsg(c1 string) (string, error) {
msg, err := parseClientFirst(c1)
if err != nil {
@@ -89,13 +152,21 @@ func (sc *ServerConversation) firstMsg(c1 string) (string, error) {
}
sc.gs2Header = msg.gs2Header
+ sc.clientCBFlag = msg.gs2BindFlag
+ sc.clientCBType = msg.channelBinding
sc.username = msg.username
sc.authzID = msg.authzID
+ // Validate channel binding flag against server configuration
+ if serverErr, err := sc.validateChannelBindingFlag(); err != nil {
+ sc.state = serverDone
+ return serverErr, err
+ }
+
sc.credential, err = sc.credentialCB(msg.username)
if err != nil {
sc.state = serverDone
- return "e=unknown-user", err
+ return ErrUnknownUser, err
}
sc.nonce = msg.nonce + sc.nonceGen()
@@ -117,17 +188,25 @@ func (sc *ServerConversation) finalMsg(c2 string) (string, error) {
return "", err
}
- // Check channel binding matches what we expect; in this case, we expect
- // just the gs2 header we received as we don't support channel binding
- // with a data payload. If we add binding, we need to independently
- // compute the header to match here.
- if string(msg.cbind) != sc.gs2Header {
- return "e=channel-bindings-dont-match", fmt.Errorf("channel binding received '%s' doesn't match expected '%s'", msg.cbind, sc.gs2Header)
+ // Check channel binding data matches what we expect
+ var expectedCBind []byte
+ if sc.clientCBFlag == "p" {
+ // Client used channel binding - expect gs2 header + channel binding data
+ expectedCBind = append([]byte(sc.gs2Header), sc.channelBinding.Data...)
+ } else {
+ // Client didn't use channel binding - just expect gs2 header
+ expectedCBind = []byte(sc.gs2Header)
+ }
+
+ if !hmac.Equal(msg.cbind, expectedCBind) {
+ return ErrChannelBindingsDontMatch,
+ fmt.Errorf("channel binding mismatch: expected %x, got %x",
+ expectedCBind, msg.cbind)
}
// Check nonce received matches what we sent
if msg.nonce != sc.nonce {
- return "e=other-error", errors.New("nonce received did not match nonce sent")
+ return ErrOtherError, errors.New("nonce received did not match nonce sent")
}
// Create auth message
@@ -135,12 +214,15 @@ func (sc *ServerConversation) finalMsg(c2 string) (string, error) {
// Retrieve ClientKey from proof and verify it
clientSignature := computeHMAC(sc.hashGen, sc.credential.StoredKey, []byte(authMsg))
- clientKey := xorBytes([]byte(msg.proof), clientSignature)
+ clientKey, err := xorBytes([]byte(msg.proof), clientSignature)
+ if err != nil {
+ return ErrOtherError, err
+ }
storedKey := computeHash(sc.hashGen, clientKey)
// Compare with constant-time function
if !hmac.Equal(storedKey, sc.credential.StoredKey) {
- return "e=invalid-proof", errors.New("challenge proof invalid")
+ return ErrInvalidProof, errors.New("challenge proof invalid")
}
sc.valid = true
diff --git a/backend/vendor/modules.txt b/backend/vendor/modules.txt
index bd35114..0b8aed2 100644
--- a/backend/vendor/modules.txt
+++ b/backend/vendor/modules.txt
@@ -18,7 +18,7 @@ github.com/go-pkgz/rest/realip
# github.com/go-pkgz/routegroup v1.6.0
## explicit; go 1.23
github.com/go-pkgz/routegroup
-# github.com/golang/snappy v0.0.4
+# github.com/golang/snappy v1.0.0
## explicit
github.com/golang/snappy
# github.com/jessevdk/go-flags v1.6.1
@@ -27,12 +27,13 @@ github.com/jessevdk/go-flags
# github.com/kennygrant/sanitize v1.2.4
## explicit
github.com/kennygrant/sanitize
-# github.com/klauspost/compress v1.17.8
-## explicit; go 1.20
+# github.com/klauspost/compress v1.18.2
+## explicit; go 1.23
github.com/klauspost/compress
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
github.com/klauspost/compress/internal/cpuinfo
+github.com/klauspost/compress/internal/le
github.com/klauspost/compress/internal/snapref
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
@@ -53,8 +54,8 @@ github.com/stretchr/testify/require
# github.com/xdg-go/pbkdf2 v1.0.0
## explicit; go 1.9
github.com/xdg-go/pbkdf2
-# github.com/xdg-go/scram v1.1.2
-## explicit; go 1.11
+# github.com/xdg-go/scram v1.2.0
+## explicit; go 1.18
github.com/xdg-go/scram
# github.com/xdg-go/stringprep v1.0.4
## explicit; go 1.11
@@ -111,7 +112,7 @@ go.mongodb.org/mongo-driver/x/mongo/driver/operation
go.mongodb.org/mongo-driver/x/mongo/driver/session
go.mongodb.org/mongo-driver/x/mongo/driver/topology
go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage
-# golang.org/x/crypto v0.44.0
+# golang.org/x/crypto v0.45.0
## explicit; go 1.24.0
golang.org/x/crypto/argon2
golang.org/x/crypto/bcrypt