diff --git a/go.mod b/go.mod
index 78853690fd0..a3492f2aa2f 100644
--- a/go.mod
+++ b/go.mod
@@ -21,7 +21,7 @@ require (
github.com/gorilla/handlers v1.5.2
github.com/gorilla/mux v1.8.1
github.com/hashicorp/golang-lru/arc/v2 v2.0.7
- github.com/klauspost/compress v1.18.1
+ github.com/klauspost/compress v1.18.4
github.com/mitchellh/mapstructure v1.5.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2
diff --git a/go.sum b/go.sum
index 6c4a1b13348..7ab26f02569 100644
--- a/go.sum
+++ b/go.sum
@@ -152,8 +152,8 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
-github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
-github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
+github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
+github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 244ee19c4bf..5125c1f267e 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -7,7 +7,7 @@ This package provides various compression algorithms.
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams.
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
-* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
+* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped/zstd HTTP requests efficiently.
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
[](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
@@ -26,6 +26,22 @@ This package will support the current Go version and 2 versions back.
Use the links above for more information on each.
# changelog
+* Jan 16th, 2026 [1.18.3](https://github.com/klauspost/compress/releases/tag/v1.18.3)
+ * Downstream CVE-2025-61728. See [golang/go#77102](https://github.com/golang/go/issues/77102).
+
+* Dec 1st, 2025 - [1.18.2](https://github.com/klauspost/compress/releases/tag/v1.18.2)
+ * flate: Fix invalid encoding on level 9 with single value input in https://github.com/klauspost/compress/pull/1115
+ * flate: reduce stateless allocations by @RXamzin in https://github.com/klauspost/compress/pull/1106
+
+* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1) - RETRACTED
+ * zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079
+ * zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059
+ * s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080
+ * zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086
+ * gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090
+ * flate: Faster load+store https://github.com/klauspost/compress/pull/1104
+ * flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101
+ * flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103
* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)
* Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036
@@ -36,6 +52,9 @@ Use the links above for more information on each.
* flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049
* flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050
+
+ See changes to v1.17.x
+
* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)
* zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017
* s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014
@@ -102,7 +121,8 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
-
+
+
See changes to v1.16.x
@@ -589,7 +609,7 @@ While the release has been extensively tested, it is recommended to testing when
# deflate usage
-The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
+The packages are drop-in replacements for standard library [deflate](https://godoc.org/github.com/klauspost/compress/flate), [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip), and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). Simply replace the import path to use them:
Typical speed is about 2x of the standard library packages.
@@ -600,17 +620,15 @@ Typical speed is about 2x of the standard library packages.
| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) |
| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) |
-* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
+You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop-in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
-You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
-
-The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
+The packages implement the same API as the standard library, so you can use the original godoc documentation: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
Currently there is only minor speedup on decompression (mostly CRC32 calculation).
Memory usage is typically 1MB for a Writer. stdlib is in the same range.
If you expect to have a lot of concurrently allocated Writers consider using
-the stateless compress described below.
+the stateless compression described below.
For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
@@ -669,3 +687,7 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
# license
This code is licensed under the same conditions as the original Go code. See LICENSE file.
+
+
+
+
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index 30df5513d56..c7e500f02a9 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -39,9 +39,6 @@ type Decoder struct {
frame *frameDec
- // Custom dictionaries.
- dicts map[uint32]*dict
-
// streamWg is the waitgroup for all streams
streamWg sync.WaitGroup
}
@@ -101,12 +98,10 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
d.current.err = ErrDecoderNilInput
}
- // Transfer option dicts.
- d.dicts = make(map[uint32]*dict, len(d.o.dicts))
- for _, dc := range d.o.dicts {
- d.dicts[dc.id] = dc
+ // Initialize dict map if needed.
+ if d.o.dicts == nil {
+ d.o.dicts = make(map[uint32]*dict)
}
- d.o.dicts = nil
// Create decoders
d.decoders = make(chan *blockDec, d.o.concurrent)
@@ -238,6 +233,21 @@ func (d *Decoder) Reset(r io.Reader) error {
return nil
}
+// ResetWithOptions will reset the decoder and apply the given options
+// for the next stream or DecodeAll operation.
+// Options are applied on top of the existing options.
+// Some options cannot be changed on reset and will return an error.
+func (d *Decoder) ResetWithOptions(r io.Reader, opts ...DOption) error {
+ d.o.resetOpt = true
+ defer func() { d.o.resetOpt = false }()
+ for _, o := range opts {
+ if err := o(&d.o); err != nil {
+ return err
+ }
+ }
+ return d.Reset(r)
+}
+
// drainOutput will drain the output until errEndOfStream is sent.
func (d *Decoder) drainOutput() {
if d.current.cancel != nil {
@@ -930,7 +940,7 @@ decodeStream:
}
func (d *Decoder) setDict(frame *frameDec) (err error) {
- dict, ok := d.dicts[frame.DictionaryID]
+ dict, ok := d.o.dicts[frame.DictionaryID]
if ok {
if debugDecoder {
println("setting dict", frame.DictionaryID)
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
index 774c5f00fe4..537627a0789 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -20,10 +20,11 @@ type decoderOptions struct {
concurrent int
maxDecodedSize uint64
maxWindowSize uint64
- dicts []*dict
+ dicts map[uint32]*dict
ignoreChecksum bool
limitToCap bool
decodeBufsBelow int
+ resetOpt bool
}
func (o *decoderOptions) setDefault() {
@@ -42,8 +43,15 @@ func (o *decoderOptions) setDefault() {
// WithDecoderLowmem will set whether to use a lower amount of memory,
// but possibly have to allocate more while running.
+// Cannot be changed with ResetWithOptions.
func WithDecoderLowmem(b bool) DOption {
- return func(o *decoderOptions) error { o.lowMem = b; return nil }
+ return func(o *decoderOptions) error {
+ if o.resetOpt && b != o.lowMem {
+ return errors.New("WithDecoderLowmem cannot be changed on Reset")
+ }
+ o.lowMem = b
+ return nil
+ }
}
// WithDecoderConcurrency sets the number of created decoders.
@@ -53,18 +61,23 @@ func WithDecoderLowmem(b bool) DOption {
// inflight blocks.
// When decoding streams and setting maximum to 1,
// no async decoding will be done.
+// The value supplied must be at least 0.
// When a value of 0 is provided GOMAXPROCS will be used.
// By default this will be set to 4 or GOMAXPROCS, whatever is lower.
+// Cannot be changed with ResetWithOptions.
func WithDecoderConcurrency(n int) DOption {
return func(o *decoderOptions) error {
if n < 0 {
- return errors.New("concurrency must be at least 1")
+ return errors.New("concurrency must be at least 0")
}
+ newVal := n
if n == 0 {
- o.concurrent = runtime.GOMAXPROCS(0)
- } else {
- o.concurrent = n
+ newVal = runtime.GOMAXPROCS(0)
}
+ if o.resetOpt && newVal != o.concurrent {
+ return errors.New("WithDecoderConcurrency cannot be changed on Reset")
+ }
+ o.concurrent = newVal
return nil
}
}
@@ -73,6 +86,7 @@ func WithDecoderConcurrency(n int) DOption {
// non-streaming operations or maximum window size for streaming operations.
// This can be used to control memory usage of potentially hostile content.
// Maximum is 1 << 63 bytes. Default is 64GiB.
+// Can be changed with ResetWithOptions.
func WithDecoderMaxMemory(n uint64) DOption {
return func(o *decoderOptions) error {
if n == 0 {
@@ -92,16 +106,20 @@ func WithDecoderMaxMemory(n uint64) DOption {
// "zstd --train" from the Zstandard reference implementation.
//
// If several dictionaries with the same ID are provided, the last one will be used.
+// Can be changed with ResetWithOptions.
//
// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
func WithDecoderDicts(dicts ...[]byte) DOption {
return func(o *decoderOptions) error {
+ if o.dicts == nil {
+ o.dicts = make(map[uint32]*dict)
+ }
for _, b := range dicts {
d, err := loadDict(b)
if err != nil {
return err
}
- o.dicts = append(o.dicts, d)
+ o.dicts[d.id] = d
}
return nil
}
@@ -109,12 +127,16 @@ func WithDecoderDicts(dicts ...[]byte) DOption {
// WithDecoderDictRaw registers a dictionary that may be used by the decoder.
// The slice content can be arbitrary data.
+// Can be changed with ResetWithOptions.
func WithDecoderDictRaw(id uint32, content []byte) DOption {
return func(o *decoderOptions) error {
if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
}
- o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}})
+ if o.dicts == nil {
+ o.dicts = make(map[uint32]*dict)
+ }
+ o.dicts[id] = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}
return nil
}
}
@@ -124,6 +146,7 @@ func WithDecoderDictRaw(id uint32, content []byte) DOption {
// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
// If WithDecoderMaxMemory is set to a lower value, that will be used.
// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec.
+// Can be changed with ResetWithOptions.
func WithDecoderMaxWindow(size uint64) DOption {
return func(o *decoderOptions) error {
if size < MinWindowSize {
@@ -141,6 +164,7 @@ func WithDecoderMaxWindow(size uint64) DOption {
// or any size set in WithDecoderMaxMemory.
// This can be used to limit decoding to a specific maximum output size.
// Disabled by default.
+// Can be changed with ResetWithOptions.
func WithDecodeAllCapLimit(b bool) DOption {
return func(o *decoderOptions) error {
o.limitToCap = b
@@ -153,17 +177,37 @@ func WithDecodeAllCapLimit(b bool) DOption {
// This typically uses less allocations but will have the full decompressed object in memory.
// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
// Default is 128KiB.
+// Cannot be changed with ResetWithOptions.
func WithDecodeBuffersBelow(size int) DOption {
return func(o *decoderOptions) error {
+ if o.resetOpt && size != o.decodeBufsBelow {
+ return errors.New("WithDecodeBuffersBelow cannot be changed on Reset")
+ }
o.decodeBufsBelow = size
return nil
}
}
// IgnoreChecksum allows to forcibly ignore checksum checking.
+// Can be changed with ResetWithOptions.
func IgnoreChecksum(b bool) DOption {
return func(o *decoderOptions) error {
o.ignoreChecksum = b
return nil
}
}
+
+// WithDecoderDictDelete removes dictionaries by ID.
+// If no ids are passed, all dictionaries are deleted.
+// Should be used with ResetWithOptions.
+func WithDecoderDictDelete(ids ...uint32) DOption {
+ return func(o *decoderOptions) error {
+ if len(ids) == 0 {
+ clear(o.dicts)
+ }
+ for _, id := range ids {
+ delete(o.dicts, id)
+ }
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index 8f8223cd3a6..19e730acc26 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -131,6 +131,22 @@ func (e *Encoder) Reset(w io.Writer) {
s.frameContentSize = 0
}
+// ResetWithOptions will re-initialize the writer and apply the given options
+// as a new, independent stream.
+// Options are applied on top of the existing options.
+// Some options cannot be changed on reset and will return an error.
+func (e *Encoder) ResetWithOptions(w io.Writer, opts ...EOption) error {
+ e.o.resetOpt = true
+ defer func() { e.o.resetOpt = false }()
+ for _, o := range opts {
+ if err := o(&e.o); err != nil {
+ return err
+ }
+ }
+ e.Reset(w)
+ return nil
+}
+
// ResetContentSize will reset and set a content size for the next stream.
// If the bytes written does not match the size given an error will be returned
// when calling Close().
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index 20671dcb91d..8e0f5cac71b 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -14,6 +14,7 @@ type EOption func(*encoderOptions) error
// options retains accumulated state of multiple options.
type encoderOptions struct {
+ resetOpt bool
concurrent int
level EncoderLevel
single *bool
@@ -71,19 +72,28 @@ func (o encoderOptions) encoder() encoder {
// WithEncoderCRC will add CRC value to output.
// Output will be 4 bytes larger.
+// Can be changed with ResetWithOptions.
func WithEncoderCRC(b bool) EOption {
return func(o *encoderOptions) error { o.crc = b; return nil }
}
// WithEncoderConcurrency will set the concurrency,
// meaning the maximum number of encoders to run concurrently.
-// The value supplied must be at least 1.
+// The value supplied must be at least 0.
+// When a value of 0 is provided GOMAXPROCS will be used.
// For streams, setting a value of 1 will disable async compression.
// By default this will be set to GOMAXPROCS.
+// Cannot be changed with ResetWithOptions.
func WithEncoderConcurrency(n int) EOption {
return func(o *encoderOptions) error {
- if n <= 0 {
- return fmt.Errorf("concurrency must be at least 1")
+ if n < 0 {
+ return errors.New("concurrency must at least 0")
+ }
+ if n == 0 {
+ n = runtime.GOMAXPROCS(0)
+ }
+ if o.resetOpt && n != o.concurrent {
+ return errors.New("WithEncoderConcurrency cannot be changed on Reset")
}
o.concurrent = n
return nil
@@ -95,6 +105,7 @@ func WithEncoderConcurrency(n int) EOption {
// A larger value will enable better compression but allocate more memory and,
// for above-default values, take considerably longer.
// The default value is determined by the compression level and max 8MB.
+// Cannot be changed with ResetWithOptions.
func WithWindowSize(n int) EOption {
return func(o *encoderOptions) error {
switch {
@@ -105,6 +116,9 @@ func WithWindowSize(n int) EOption {
case (n & (n - 1)) != 0:
return errors.New("window size must be a power of 2")
}
+ if o.resetOpt && n != o.windowSize {
+ return errors.New("WithWindowSize cannot be changed on Reset")
+ }
o.windowSize = n
o.customWindow = true
@@ -122,6 +136,7 @@ func WithWindowSize(n int) EOption {
// n must be > 0 and <= 1GB, 1<<30 bytes.
// The padded area will be filled with data from crypto/rand.Reader.
// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this.
+// Can be changed with ResetWithOptions.
func WithEncoderPadding(n int) EOption {
return func(o *encoderOptions) error {
if n <= 0 {
@@ -215,12 +230,16 @@ func (e EncoderLevel) String() string {
}
// WithEncoderLevel specifies a predefined compression level.
+// Cannot be changed with ResetWithOptions.
func WithEncoderLevel(l EncoderLevel) EOption {
return func(o *encoderOptions) error {
switch {
case l <= speedNotSet || l >= speedLast:
return fmt.Errorf("unknown encoder level")
}
+ if o.resetOpt && l != o.level {
+ return errors.New("WithEncoderLevel cannot be changed on Reset")
+ }
o.level = l
if !o.customWindow {
switch o.level {
@@ -248,6 +267,7 @@ func WithEncoderLevel(l EncoderLevel) EOption {
// WithZeroFrames will encode 0 length input as full frames.
// This can be needed for compatibility with zstandard usage,
// but is not needed for this package.
+// Can be changed with ResetWithOptions.
func WithZeroFrames(b bool) EOption {
return func(o *encoderOptions) error {
o.fullZero = b
@@ -259,6 +279,7 @@ func WithZeroFrames(b bool) EOption {
// Disabling this will skip incompressible data faster, but in cases with no matches but
// skewed character distribution compression is lost.
// Default value depends on the compression level selected.
+// Can be changed with ResetWithOptions.
func WithAllLitEntropyCompression(b bool) EOption {
return func(o *encoderOptions) error {
o.customALEntropy = true
@@ -270,6 +291,7 @@ func WithAllLitEntropyCompression(b bool) EOption {
// WithNoEntropyCompression will always skip entropy compression of literals.
// This can be useful if content has matches, but unlikely to benefit from entropy
// compression. Usually the slight speed improvement is not worth enabling this.
+// Can be changed with ResetWithOptions.
func WithNoEntropyCompression(b bool) EOption {
return func(o *encoderOptions) error {
o.noEntropy = b
@@ -287,6 +309,7 @@ func WithNoEntropyCompression(b bool) EOption {
// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations.
// If this is not specified, block encodes will automatically choose this based on the input size and the window size.
// This setting has no effect on streamed encodes.
+// Can be changed with ResetWithOptions.
func WithSingleSegment(b bool) EOption {
return func(o *encoderOptions) error {
o.single = &b
@@ -298,8 +321,12 @@ func WithSingleSegment(b bool) EOption {
// slower encoding speed.
// This will not change the window size which is the primary function for reducing
// memory usage. See WithWindowSize.
+// Cannot be changed with ResetWithOptions.
func WithLowerEncoderMem(b bool) EOption {
return func(o *encoderOptions) error {
+ if o.resetOpt && b != o.lowMem {
+ return errors.New("WithLowerEncoderMem cannot be changed on Reset")
+ }
o.lowMem = b
return nil
}
@@ -311,6 +338,7 @@ func WithLowerEncoderMem(b bool) EOption {
// "zstd --train" from the Zstandard reference implementation.
//
// The encoder *may* choose to use no dictionary instead for certain payloads.
+// Can be changed with ResetWithOptions.
//
// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
func WithEncoderDict(dict []byte) EOption {
@@ -328,6 +356,7 @@ func WithEncoderDict(dict []byte) EOption {
//
// The slice content may contain arbitrary data. It will be used as an initial
// history.
+// Can be changed with ResetWithOptions.
func WithEncoderDictRaw(id uint32, content []byte) EOption {
return func(o *encoderOptions) error {
if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
@@ -337,3 +366,12 @@ func WithEncoderDictRaw(id uint32, content []byte) EOption {
return nil
}
}
+
+// WithEncoderDictDelete clears the dictionary, so no dictionary will be used.
+// Should be used with ResetWithOptions.
+func WithEncoderDictDelete() EOption {
+ return func(o *encoderOptions) error {
+ o.dict = nil
+ return nil
+ }
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index aeb8848ef92..1360abe0570 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -361,7 +361,7 @@ github.com/inconshreveable/mousetrap
# github.com/jmespath/go-jmespath v0.4.0
## explicit; go 1.14
github.com/jmespath/go-jmespath
-# github.com/klauspost/compress v1.18.1
+# github.com/klauspost/compress v1.18.4
## explicit; go 1.23
github.com/klauspost/compress
github.com/klauspost/compress/fse