vendor: github.com/klauspost/compress v1.17.4 (#4181)

This commit is contained in:
Milos Gajdos 2023-12-01 10:51:07 +00:00 committed by GitHub
commit 641306d946
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
38 changed files with 754 additions and 307 deletions

2
go.mod
View file

@ -19,7 +19,7 @@ require (
github.com/gorilla/handlers v1.5.1 github.com/gorilla/handlers v1.5.1
github.com/gorilla/mux v1.8.0 github.com/gorilla/mux v1.8.0
github.com/hashicorp/golang-lru/arc/v2 v2.0.5 github.com/hashicorp/golang-lru/arc/v2 v2.0.5
github.com/klauspost/compress v1.16.5 github.com/klauspost/compress v1.17.4
github.com/mitchellh/mapstructure v1.1.2 github.com/mitchellh/mapstructure v1.1.2
github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2 github.com/opencontainers/image-spec v1.0.2

4
go.sum
View file

@ -223,8 +223,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=

View file

@ -3,7 +3,7 @@
before: before:
hooks: hooks:
- ./gen.sh - ./gen.sh
- go install mvdan.cc/garble@v0.9.3 - go install mvdan.cc/garble@v0.10.1
builds: builds:
- -
@ -92,16 +92,7 @@ builds:
archives: archives:
- -
id: s2-binaries id: s2-binaries
name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
replacements:
aix: AIX
darwin: OSX
linux: Linux
windows: Windows
386: i386
amd64: x86_64
freebsd: FreeBSD
netbsd: NetBSD
format_overrides: format_overrides:
- goos: windows - goos: windows
format: zip format: zip
@ -125,7 +116,7 @@ changelog:
nfpms: nfpms:
- -
file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
vendor: Klaus Post vendor: Klaus Post
homepage: https://github.com/klauspost/compress homepage: https://github.com/klauspost/compress
maintainer: Klaus Post <klauspost@gmail.com> maintainer: Klaus Post <klauspost@gmail.com>
@ -134,8 +125,3 @@ nfpms:
formats: formats:
- deb - deb
- rpm - rpm
replacements:
darwin: Darwin
linux: Linux
freebsd: FreeBSD
amd64: x86_64

View file

@ -16,6 +16,36 @@ This package provides various compression algorithms.
# changelog # changelog
* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2)
* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
* s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
* s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
* Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838
* flate: Add limited window compression https://github.com/klauspost/compress/pull/843
* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
* s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832
* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6)
* zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806
* zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824
* gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815
* s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663
* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5)
* zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802
* gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804
* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) * Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
* zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
* zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792
@ -40,6 +70,9 @@ This package provides various compression algorithms.
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
<details>
<summary>See changes to v1.15.x</summary>
* Jan 21st, 2023 (v1.15.15) * Jan 21st, 2023 (v1.15.15)
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
@ -166,6 +199,8 @@ Stream decompression is now faster on asynchronous, since the goroutine allocati
While the release has been extensively tested, it is recommended to testing when upgrading. While the release has been extensively tested, it is recommended to testing when upgrading.
</details>
<details> <details>
<summary>See changes to v1.14.x</summary> <summary>See changes to v1.14.x</summary>
@ -626,6 +661,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. * [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. * [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. * [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index.
* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor.
# license # license

25
vendor/github.com/klauspost/compress/SECURITY.md generated vendored Normal file
View file

@ -0,0 +1,25 @@
# Security Policy
## Supported Versions
Security updates are applied only to the latest release.
## Vulnerability Definition
A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability.
Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently.
Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue.
It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability.
Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround.
## Reporting a Vulnerability
If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that.
This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base.

View file

@ -152,12 +152,11 @@ func (b *bitWriter) flushAlign() {
// close will write the alignment bit and write the final byte(s) // close will write the alignment bit and write the final byte(s)
// to the output. // to the output.
func (b *bitWriter) close() error { func (b *bitWriter) close() {
// End mark // End mark
b.addBits16Clean(1, 1) b.addBits16Clean(1, 1)
// flush until next byte. // flush until next byte.
b.flushAlign() b.flushAlign()
return nil
} }
// reset and continue writing by appending to out. // reset and continue writing by appending to out.

View file

@ -199,7 +199,8 @@ func (s *Scratch) compress(src []byte) error {
c2.flush(s.actualTableLog) c2.flush(s.actualTableLog)
c1.flush(s.actualTableLog) c1.flush(s.actualTableLog)
return s.bw.close() s.bw.close()
return nil
} }
// writeCount will write the normalized histogram count to header. // writeCount will write the normalized histogram count to header.
@ -211,7 +212,7 @@ func (s *Scratch) writeCount() error {
previous0 bool previous0 bool
charnum uint16 charnum uint16
maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3
// Write Table Size // Write Table Size
bitStream = uint32(tableLog - minTablelog) bitStream = uint32(tableLog - minTablelog)

View file

@ -13,14 +13,6 @@ type bitWriter struct {
out []byte out []byte
} }
// bitMask16 is bitmasks. Has extra to avoid bounds check.
var bitMask16 = [32]uint16{
0, 1, 3, 7, 0xF, 0x1F,
0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF} /* up to 16 bits */
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently. // It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
@ -102,10 +94,9 @@ func (b *bitWriter) flushAlign() {
// close will write the alignment bit and write the final byte(s) // close will write the alignment bit and write the final byte(s)
// to the output. // to the output.
func (b *bitWriter) close() error { func (b *bitWriter) close() {
// End mark // End mark
b.addBits16Clean(1, 1) b.addBits16Clean(1, 1)
// flush until next byte. // flush until next byte.
b.flushAlign() b.flushAlign()
return nil
} }

View file

@ -1,44 +0,0 @@
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
package huff0
// byteReader provides a byte reader that reads
// little endian values from a byte stream.
// The input stream is manually advanced.
// The reader performs no bounds checks.
type byteReader struct {
b []byte
off int
}
// init will initialize the reader and set the input.
func (b *byteReader) init(in []byte) {
b.b = in
b.off = 0
}
// Int32 returns a little endian int32 starting at current offset.
func (b byteReader) Int32() int32 {
v3 := int32(b.b[b.off+3])
v2 := int32(b.b[b.off+2])
v1 := int32(b.b[b.off+1])
v0 := int32(b.b[b.off])
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
}
// Uint32 returns a little endian uint32 starting at current offset.
func (b byteReader) Uint32() uint32 {
v3 := uint32(b.b[b.off+3])
v2 := uint32(b.b[b.off+2])
v1 := uint32(b.b[b.off+1])
v0 := uint32(b.b[b.off])
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
}
// remain will return the number of bytes remaining.
func (b byteReader) remain() int {
return len(b.b) - b.off
}

View file

@ -227,10 +227,10 @@ func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err err
} }
func (s *Scratch) compress1X(src []byte) ([]byte, error) { func (s *Scratch) compress1X(src []byte) ([]byte, error) {
return s.compress1xDo(s.Out, src) return s.compress1xDo(s.Out, src), nil
} }
func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { func (s *Scratch) compress1xDo(dst, src []byte) []byte {
var bw = bitWriter{out: dst} var bw = bitWriter{out: dst}
// N is length divisible by 4. // N is length divisible by 4.
@ -260,8 +260,8 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
bw.encTwoSymbols(cTable, tmp[1], tmp[0]) bw.encTwoSymbols(cTable, tmp[1], tmp[0])
} }
} }
err := bw.close() bw.close()
return bw.out, err return bw.out
} }
var sixZeros [6]byte var sixZeros [6]byte
@ -283,12 +283,8 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
} }
src = src[len(toDo):] src = src[len(toDo):]
var err error
idx := len(s.Out) idx := len(s.Out)
s.Out, err = s.compress1xDo(s.Out, toDo) s.Out = s.compress1xDo(s.Out, toDo)
if err != nil {
return nil, err
}
if len(s.Out)-idx > math.MaxUint16 { if len(s.Out)-idx > math.MaxUint16 {
// We cannot store the size in the jump table // We cannot store the size in the jump table
return nil, ErrIncompressible return nil, ErrIncompressible
@ -315,7 +311,6 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
segmentSize := (len(src) + 3) / 4 segmentSize := (len(src) + 3) / 4
var wg sync.WaitGroup var wg sync.WaitGroup
var errs [4]error
wg.Add(4) wg.Add(4)
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
toDo := src toDo := src
@ -326,15 +321,12 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
// Separate goroutine for each block. // Separate goroutine for each block.
go func(i int) { go func(i int) {
s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
wg.Done() wg.Done()
}(i) }(i)
} }
wg.Wait() wg.Wait()
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
if errs[i] != nil {
return nil, errs[i]
}
o := s.tmpOut[i] o := s.tmpOut[i]
if len(o) > math.MaxUint16 { if len(o) > math.MaxUint16 {
// We cannot store the size in the jump table // We cannot store the size in the jump table
@ -358,6 +350,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
// Does not update s.clearCount. // Does not update s.clearCount.
func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
reuse = true reuse = true
_ = s.count // Assert that s != nil to speed up the following loop.
for _, v := range in { for _, v := range in {
s.count[v]++ s.count[v]++
} }
@ -423,7 +416,7 @@ func (s *Scratch) validateTable(c cTable) bool {
// minTableLog provides the minimum logSize to safely represent a distribution. // minTableLog provides the minimum logSize to safely represent a distribution.
func (s *Scratch) minTableLog() uint8 { func (s *Scratch) minTableLog() uint8 {
minBitsSrc := highBit32(uint32(s.br.remain())) + 1 minBitsSrc := highBit32(uint32(s.srcLen)) + 1
minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
if minBitsSrc < minBitsSymbols { if minBitsSrc < minBitsSymbols {
return uint8(minBitsSrc) return uint8(minBitsSrc)
@ -435,7 +428,7 @@ func (s *Scratch) minTableLog() uint8 {
func (s *Scratch) optimalTableLog() { func (s *Scratch) optimalTableLog() {
tableLog := s.TableLog tableLog := s.TableLog
minBits := s.minTableLog() minBits := s.minTableLog()
maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1
if maxBitsSrc < tableLog { if maxBitsSrc < tableLog {
// Accuracy can be reduced // Accuracy can be reduced
tableLog = maxBitsSrc tableLog = maxBitsSrc

View file

@ -253,7 +253,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
switch d.actualTableLog { switch d.actualTableLog {
case 8: case 8:
const shift = 8 - 8 const shift = 0
for br.off >= 4 { for br.off >= 4 {
br.fillFast() br.fillFast()
v := dt[uint8(br.value>>(56+shift))] v := dt[uint8(br.value>>(56+shift))]

View file

@ -88,7 +88,7 @@ type Scratch struct {
// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
MaxDecodedSize int MaxDecodedSize int
br byteReader srcLen int
// MaxSymbolValue will override the maximum symbol value of the next block. // MaxSymbolValue will override the maximum symbol value of the next block.
MaxSymbolValue uint8 MaxSymbolValue uint8
@ -170,7 +170,7 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) {
if s.fse == nil { if s.fse == nil {
s.fse = &fse.Scratch{} s.fse = &fse.Scratch{}
} }
s.br.init(in) s.srcLen = len(in)
return s, nil return s, nil
} }

View file

@ -87,18 +87,6 @@ func emitCopy(dst []byte, offset, length int) int {
return i + 2 return i + 2
} }
// extendMatch returns the largest k such that k <= len(src) and that
// src[i:i+k-j] and src[j:k] have the same contents.
//
// It assumes that:
//
// 0 <= i && i < j && j <= len(src)
func extendMatch(src []byte, i, j int) int {
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
}
return j
}
func hash(u, shift uint32) uint32 { func hash(u, shift uint32) uint32 {
return (u * 0x1e35a7bd) >> shift return (u * 0x1e35a7bd) >> shift
} }

View file

@ -259,7 +259,7 @@ nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68
## Decompressor ## Decompressor
Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
kindly supplied by [fuzzit.dev](https://fuzzit.dev/). kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
@ -304,7 +304,7 @@ import "github.com/klauspost/compress/zstd"
// Create a reader that caches decompressors. // Create a reader that caches decompressors.
// For this operation type we supply a nil Reader. // For this operation type we supply a nil Reader.
var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0))
// Decompress a buffer. We don't supply a destination buffer, // Decompress a buffer. We don't supply a destination buffer,
// so it will be allocated by the decoder. // so it will be allocated by the decoder.

View file

@ -17,7 +17,6 @@ import (
// for aligning the input. // for aligning the input.
type bitReader struct { type bitReader struct {
in []byte in []byte
off uint // next byte to read is at in[off - 1]
value uint64 // Maybe use [16]byte, but shifting is awkward. value uint64 // Maybe use [16]byte, but shifting is awkward.
bitsRead uint8 bitsRead uint8
} }
@ -28,7 +27,6 @@ func (b *bitReader) init(in []byte) error {
return errors.New("corrupt stream: too short") return errors.New("corrupt stream: too short")
} }
b.in = in b.in = in
b.off = uint(len(in))
// The highest bit of the last byte indicates where to start // The highest bit of the last byte indicates where to start
v := in[len(in)-1] v := in[len(in)-1]
if v == 0 { if v == 0 {
@ -69,21 +67,19 @@ func (b *bitReader) fillFast() {
if b.bitsRead < 32 { if b.bitsRead < 32 {
return return
} }
// 2 bounds checks. v := b.in[len(b.in)-4:]
v := b.in[b.off-4:] b.in = b.in[:len(b.in)-4]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low) b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32 b.bitsRead -= 32
b.off -= 4
} }
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
func (b *bitReader) fillFastStart() { func (b *bitReader) fillFastStart() {
// Do single re-slice to avoid bounds checks. v := b.in[len(b.in)-8:]
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) b.in = b.in[:len(b.in)-8]
b.value = binary.LittleEndian.Uint64(v)
b.bitsRead = 0 b.bitsRead = 0
b.off -= 8
} }
// fill() will make sure at least 32 bits are available. // fill() will make sure at least 32 bits are available.
@ -91,25 +87,25 @@ func (b *bitReader) fill() {
if b.bitsRead < 32 { if b.bitsRead < 32 {
return return
} }
if b.off >= 4 { if len(b.in) >= 4 {
v := b.in[b.off-4:] v := b.in[len(b.in)-4:]
v = v[:4] b.in = b.in[:len(b.in)-4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low) b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32 b.bitsRead -= 32
b.off -= 4
return return
} }
for b.off > 0 {
b.value = (b.value << 8) | uint64(b.in[b.off-1]) b.bitsRead -= uint8(8 * len(b.in))
b.bitsRead -= 8 for len(b.in) > 0 {
b.off-- b.value = (b.value << 8) | uint64(b.in[len(b.in)-1])
b.in = b.in[:len(b.in)-1]
} }
} }
// finished returns true if all bits have been read from the bit stream. // finished returns true if all bits have been read from the bit stream.
func (b *bitReader) finished() bool { func (b *bitReader) finished() bool {
return b.off == 0 && b.bitsRead >= 64 return len(b.in) == 0 && b.bitsRead >= 64
} }
// overread returns true if more bits have been requested than is on the stream. // overread returns true if more bits have been requested than is on the stream.
@ -119,7 +115,7 @@ func (b *bitReader) overread() bool {
// remain returns the number of bits remaining. // remain returns the number of bits remaining.
func (b *bitReader) remain() uint { func (b *bitReader) remain() uint {
return b.off*8 + 64 - uint(b.bitsRead) return 8*uint(len(b.in)) + 64 - uint(b.bitsRead)
} }
// close the bitstream and returns an error if out-of-buffer reads occurred. // close the bitstream and returns an error if out-of-buffer reads occurred.

View file

@ -97,12 +97,11 @@ func (b *bitWriter) flushAlign() {
// close will write the alignment bit and write the final byte(s) // close will write the alignment bit and write the final byte(s)
// to the output. // to the output.
func (b *bitWriter) close() error { func (b *bitWriter) close() {
// End mark // End mark
b.addBits16Clean(1, 1) b.addBits16Clean(1, 1)
// flush until next byte. // flush until next byte.
b.flushAlign() b.flushAlign()
return nil
} }
// reset and continue writing by appending to out. // reset and continue writing by appending to out.

View file

@ -592,7 +592,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
} }
seq.fse.setRLE(symb) seq.fse.setRLE(symb)
if debugDecoder { if debugDecoder {
printf("RLE set to %+v, code: %v", symb, v) printf("RLE set to 0x%x, code: %v", symb, v)
} }
case compModeFSE: case compModeFSE:
println("Reading table for", tableIndex(i)) println("Reading table for", tableIndex(i))

View file

@ -361,14 +361,21 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
if len(lits) >= 1024 { if len(lits) >= 1024 {
// Use 4 Streams. // Use 4 Streams.
out, reUsed, err = huff0.Compress4X(lits, b.litEnc) out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
} else if len(lits) > 32 { } else if len(lits) > 16 {
// Use 1 stream // Use 1 stream
single = true single = true
out, reUsed, err = huff0.Compress1X(lits, b.litEnc) out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
} else { } else {
err = huff0.ErrIncompressible err = huff0.ErrIncompressible
} }
if err == nil && len(out)+5 > len(lits) {
// If we are close, we may still be worse or equal to raw.
var lh literalsHeader
lh.setSizes(len(out), len(lits), single)
if len(out)+lh.size() >= len(lits) {
err = huff0.ErrIncompressible
}
}
switch err { switch err {
case huff0.ErrIncompressible: case huff0.ErrIncompressible:
if debugEncoder { if debugEncoder {
@ -503,7 +510,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if len(b.literals) >= 1024 && !raw { if len(b.literals) >= 1024 && !raw {
// Use 4 Streams. // Use 4 Streams.
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
} else if len(b.literals) > 32 && !raw { } else if len(b.literals) > 16 && !raw {
// Use 1 stream // Use 1 stream
single = true single = true
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
@ -511,6 +518,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
err = huff0.ErrIncompressible err = huff0.ErrIncompressible
} }
if err == nil && len(out)+5 > len(b.literals) {
// If we are close, we may still be worse or equal to raw.
var lh literalsHeader
lh.setSize(len(b.literals))
szRaw := lh.size()
lh.setSizes(len(out), len(b.literals), single)
szComp := lh.size()
if len(out)+szComp >= len(b.literals)+szRaw {
err = huff0.ErrIncompressible
}
}
switch err { switch err {
case huff0.ErrIncompressible: case huff0.ErrIncompressible:
lh.setType(literalsBlockRaw) lh.setType(literalsBlockRaw)
@ -773,10 +791,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
ml.flush(mlEnc.actualTableLog) ml.flush(mlEnc.actualTableLog)
of.flush(ofEnc.actualTableLog) of.flush(ofEnc.actualTableLog)
ll.flush(llEnc.actualTableLog) ll.flush(llEnc.actualTableLog)
err = wr.close() wr.close()
if err != nil {
return err
}
b.output = wr.out b.output = wr.out
// Maybe even add a bigger margin. // Maybe even add a bigger margin.

View file

@ -107,7 +107,7 @@ func WithDecoderDicts(dicts ...[]byte) DOption {
} }
} }
// WithEncoderDictRaw registers a dictionary that may be used by the decoder. // WithDecoderDictRaw registers a dictionary that may be used by the decoder.
// The slice content can be arbitrary data. // The slice content can be arbitrary data.
func WithDecoderDictRaw(id uint32, content []byte) DOption { func WithDecoderDictRaw(id uint32, content []byte) DOption {
return func(o *decoderOptions) error { return func(o *decoderOptions) error {

View file

@ -1,10 +1,13 @@
package zstd package zstd
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math"
"sort"
"github.com/klauspost/compress/huff0" "github.com/klauspost/compress/huff0"
) )
@ -14,7 +17,6 @@ type dict struct {
litEnc *huff0.Scratch litEnc *huff0.Scratch
llDec, ofDec, mlDec sequenceDec llDec, ofDec, mlDec sequenceDec
//llEnc, ofEnc, mlEnc []*fseEncoder
offsets [3]int offsets [3]int
content []byte content []byte
} }
@ -159,3 +161,374 @@ func InspectDictionary(b []byte) (interface {
d, err := loadDict(b) d, err := loadDict(b)
return d, err return d, err
} }
type BuildDictOptions struct {
// Dictionary ID.
ID uint32
// Content to use to create dictionary tables.
Contents [][]byte
// History to use for all blocks.
History []byte
// Offsets to use.
Offsets [3]int
// CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier.
// See https://github.com/facebook/zstd/issues/3724
CompatV155 bool
// Use the specified encoder level.
// The dictionary will be built using the specified encoder level,
// which will reflect speed and make the dictionary tailored for that level.
// If not set SpeedBestCompression will be used.
Level EncoderLevel
// DebugOut will write stats and other details here if set.
DebugOut io.Writer
}
func BuildDict(o BuildDictOptions) ([]byte, error) {
initPredefined()
hist := o.History
contents := o.Contents
debug := o.DebugOut != nil
println := func(args ...interface{}) {
if o.DebugOut != nil {
fmt.Fprintln(o.DebugOut, args...)
}
}
printf := func(s string, args ...interface{}) {
if o.DebugOut != nil {
fmt.Fprintf(o.DebugOut, s, args...)
}
}
print := func(args ...interface{}) {
if o.DebugOut != nil {
fmt.Fprint(o.DebugOut, args...)
}
}
if int64(len(hist)) > dictMaxLength {
return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength))
}
if len(hist) < 8 {
return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8)
}
if len(contents) == 0 {
return nil, errors.New("no content provided")
}
d := dict{
id: o.ID,
litEnc: nil,
llDec: sequenceDec{},
ofDec: sequenceDec{},
mlDec: sequenceDec{},
offsets: o.Offsets,
content: hist,
}
block := blockEnc{lowMem: false}
block.init()
enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}})
if o.Level != 0 {
eOpts := encoderOptions{
level: o.Level,
blockSize: maxMatchLen,
windowSize: maxMatchLen,
dict: &d,
lowMem: false,
}
enc = eOpts.encoder()
} else {
o.Level = SpeedBestCompression
}
var (
remain [256]int
ll [256]int
ml [256]int
of [256]int
)
addValues := func(dst *[256]int, src []byte) {
for _, v := range src {
dst[v]++
}
}
addHist := func(dst *[256]int, src *[256]uint32) {
for i, v := range src {
dst[i] += int(v)
}
}
seqs := 0
nUsed := 0
litTotal := 0
newOffsets := make(map[uint32]int, 1000)
for _, b := range contents {
block.reset(nil)
if len(b) < 8 {
continue
}
nUsed++
enc.Reset(&d, true)
enc.Encode(&block, b)
addValues(&remain, block.literals)
litTotal += len(block.literals)
seqs += len(block.sequences)
block.genCodes()
addHist(&ll, block.coders.llEnc.Histogram())
addHist(&ml, block.coders.mlEnc.Histogram())
addHist(&of, block.coders.ofEnc.Histogram())
for i, seq := range block.sequences {
if i > 3 {
break
}
offset := seq.offset
if offset == 0 {
continue
}
if offset > 3 {
newOffsets[offset-3]++
} else {
newOffsets[uint32(o.Offsets[offset-1])]++
}
}
}
// Find most used offsets.
var sortedOffsets []uint32
for k := range newOffsets {
sortedOffsets = append(sortedOffsets, k)
}
sort.Slice(sortedOffsets, func(i, j int) bool {
a, b := sortedOffsets[i], sortedOffsets[j]
if a == b {
// Prefer the longer offset
return sortedOffsets[i] > sortedOffsets[j]
}
return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]]
})
if len(sortedOffsets) > 3 {
if debug {
print("Offsets:")
for i, v := range sortedOffsets {
if i > 20 {
break
}
printf("[%d: %d],", v, newOffsets[v])
}
println("")
}
sortedOffsets = sortedOffsets[:3]
}
for i, v := range sortedOffsets {
o.Offsets[i] = int(v)
}
if debug {
println("New repeat offsets", o.Offsets)
}
if nUsed == 0 || seqs == 0 {
return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs)
}
if debug {
println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal)
}
if seqs/nUsed < 512 {
// Use 512 as minimum.
nUsed = seqs / 512
}
copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
hist := dst.Histogram()
var maxSym uint8
var maxCount int
var fakeLength int
for i, v := range src {
if v > 0 {
v = v / nUsed
if v == 0 {
v = 1
}
}
if v > maxCount {
maxCount = v
}
if v != 0 {
maxSym = uint8(i)
}
fakeLength += v
hist[i] = uint32(v)
}
dst.HistogramFinished(maxSym, maxCount)
dst.reUsed = false
dst.useRLE = false
err := dst.normalizeCount(fakeLength)
if err != nil {
return nil, err
}
if debug {
println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength)
}
return dst.writeCount(nil)
}
if debug {
print("Literal lengths: ")
}
llTable, err := copyHist(block.coders.llEnc, &ll)
if err != nil {
return nil, err
}
if debug {
print("Match lengths: ")
}
mlTable, err := copyHist(block.coders.mlEnc, &ml)
if err != nil {
return nil, err
}
if debug {
print("Offsets: ")
}
ofTable, err := copyHist(block.coders.ofEnc, &of)
if err != nil {
return nil, err
}
// Literal table
avgSize := litTotal
if avgSize > huff0.BlockSizeMax/2 {
avgSize = huff0.BlockSizeMax / 2
}
huffBuff := make([]byte, 0, avgSize)
// Target size
div := litTotal / avgSize
if div < 1 {
div = 1
}
if debug {
println("Huffman weights:")
}
for i, n := range remain[:] {
if n > 0 {
n = n / div
// Allow all entries to be represented.
if n == 0 {
n = 1
}
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
if debug {
printf("[%d: %d], ", i, n)
}
}
}
if o.CompatV155 && remain[255]/div == 0 {
huffBuff = append(huffBuff, 255)
}
scratch := &huff0.Scratch{TableLog: 11}
for tries := 0; tries < 255; tries++ {
scratch = &huff0.Scratch{TableLog: 11}
_, _, err = huff0.Compress1X(huffBuff, scratch)
if err == nil {
break
}
if debug {
printf("Try %d: Huffman error: %v\n", tries+1, err)
}
huffBuff = huffBuff[:0]
if tries == 250 {
if debug {
println("Huffman: Bailing out with predefined table")
}
// Bail out.... Just generate something
huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
for i := 0; i < 128; i++ {
huffBuff = append(huffBuff, byte(i))
}
continue
}
if errors.Is(err, huff0.ErrIncompressible) {
// Try truncating least common.
for i, n := range remain[:] {
if n > 0 {
n = n / (div * (i + 1))
if n > 0 {
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
}
}
}
if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 {
huffBuff = append(huffBuff, 255)
}
if len(huffBuff) == 0 {
huffBuff = append(huffBuff, 0, 255)
}
}
if errors.Is(err, huff0.ErrUseRLE) {
for i, n := range remain[:] {
n = n / (div * (i + 1))
// Allow all entries to be represented.
if n == 0 {
n = 1
}
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
}
}
}
var out bytes.Buffer
out.Write([]byte(dictMagic))
out.Write(binary.LittleEndian.AppendUint32(nil, o.ID))
out.Write(scratch.OutTable)
if debug {
println("huff table:", len(scratch.OutTable), "bytes")
println("of table:", len(ofTable), "bytes")
println("ml table:", len(mlTable), "bytes")
println("ll table:", len(llTable), "bytes")
}
out.Write(ofTable)
out.Write(mlTable)
out.Write(llTable)
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0])))
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1])))
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2])))
out.Write(hist)
if debug {
_, err := loadDict(out.Bytes())
if err != nil {
panic(err)
}
i, err := InspectDictionary(out.Bytes())
if err != nil {
panic(err)
}
println("ID:", i.ID())
println("Content size:", i.ContentSize())
println("Encoder:", i.LitEncoder() != nil)
println("Offsets:", i.Offsets())
var totalSize int
for _, b := range contents {
totalSize += len(b)
}
encWith := func(opts ...EOption) int {
enc, err := NewWriter(nil, opts...)
if err != nil {
panic(err)
}
defer enc.Close()
var dst []byte
var totalSize int
for _, b := range contents {
dst = enc.EncodeAll(b, dst[:0])
totalSize += len(dst)
}
return totalSize
}
plain := encWith(WithEncoderLevel(o.Level))
withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes()))
println("Input size:", totalSize)
println("Plain Compressed:", plain)
println("Dict Compressed:", withDict)
println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)")
}
return out.Bytes(), nil
}

View file

@ -144,6 +144,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
} else { } else {
e.crc.Reset() e.crc.Reset()
} }
e.blk.dictLitEnc = nil
if d != nil { if d != nil {
low := e.lowMem low := e.lowMem
if singleBlock { if singleBlock {

View file

@ -43,7 +43,7 @@ func (m *match) estBits(bitsPerByte int32) {
if m.rep < 0 { if m.rep < 0 {
ofc = ofCode(uint32(m.s-m.offset) + 3) ofc = ofCode(uint32(m.s-m.offset) + 3)
} else { } else {
ofc = ofCode(uint32(m.rep)) ofc = ofCode(uint32(m.rep) & 3)
} }
// Cost, excluding // Cost, excluding
ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
@ -197,12 +197,13 @@ encodeLoop:
// Set m to a match at offset if it looks like that will improve compression. // Set m to a match at offset if it looks like that will improve compression.
improve := func(m *match, offset int32, s int32, first uint32, rep int32) { improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
if s-offset >= e.maxMatchOff || load3232(src, offset) != first { delta := s - offset
if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first {
return return
} }
if debugAsserts { if debugAsserts {
if offset <= 0 { if offset >= s {
panic(offset) panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
} }
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
@ -226,7 +227,7 @@ encodeLoop:
} }
} }
l := 4 + e.matchlen(s+4, offset+4, src) l := 4 + e.matchlen(s+4, offset+4, src)
if rep < 0 { if true {
// Extend candidate match backwards as far as possible. // Extend candidate match backwards as far as possible.
tMin := s - e.maxMatchOff tMin := s - e.maxMatchOff
if tMin < 0 { if tMin < 0 {
@ -281,6 +282,7 @@ encodeLoop:
// Load next and check... // Load next and check...
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
index0 := s + 1
// Look far ahead, unless we have a really long match already... // Look far ahead, unless we have a really long match already...
if best.length < goodEnough { if best.length < goodEnough {
@ -343,8 +345,8 @@ encodeLoop:
if best.rep > 0 { if best.rep > 0 {
var seq seq var seq seq
seq.matchLen = uint32(best.length - zstdMinMatch) seq.matchLen = uint32(best.length - zstdMinMatch)
if debugAsserts && s <= nextEmit { if debugAsserts && s < nextEmit {
panic("s <= nextEmit") panic("s < nextEmit")
} }
addLiterals(&seq, best.s) addLiterals(&seq, best.s)
@ -356,19 +358,16 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
// Index old s + 1 -> s - 1 // Index old s + 1 -> s - 1
index0 := s + 1
s = best.s + best.length s = best.s + best.length
nextEmit = s nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, best.length)
}
break encodeLoop
}
// Index skipped... // Index skipped...
end := s
if s > sLimit+4 {
end = sLimit + 4
}
off := index0 + e.cur off := index0 + e.cur
for index0 < s { for index0 < end {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@ -377,6 +376,7 @@ encodeLoop:
off++ off++
index0++ index0++
} }
switch best.rep { switch best.rep {
case 2, 4 | 1: case 2, 4 | 1:
offset1, offset2 = offset2, offset1 offset1, offset2 = offset2, offset1
@ -385,12 +385,17 @@ encodeLoop:
case 4 | 3: case 4 | 3:
offset1, offset2, offset3 = offset1-1, offset1, offset2 offset1, offset2, offset3 = offset1-1, offset1, offset2
} }
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, best.length)
}
break encodeLoop
}
continue continue
} }
// A 4-byte match has been found. Update recent offsets. // A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes. // We'll later see if more than 4 bytes.
index0 := s + 1
s = best.s s = best.s
t := best.offset t := best.offset
offset1, offset2, offset3 = s-t, offset1, offset2 offset1, offset2, offset3 = s-t, offset1, offset2
@ -418,19 +423,25 @@ encodeLoop:
} }
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
nextEmit = s nextEmit = s
if s >= sLimit {
break encodeLoop // Index old s + 1 -> s - 1 or sLimit
end := s
if s > sLimit-4 {
end = sLimit - 4
} }
// Index old s + 1 -> s - 1 off := index0 + e.cur
for index0 < s { for index0 < end {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
index0++ index0++
off++
}
if s >= sLimit {
break encodeLoop
} }
} }

View file

@ -145,7 +145,7 @@ encodeLoop:
var t int32 var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks // We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2 canRepeat := len(blk.sequences) > 2
var matched int32 var matched, index0 int32
for { for {
if debugAsserts && canRepeat && offset1 == 0 { if debugAsserts && canRepeat && offset1 == 0 {
@ -162,6 +162,7 @@ encodeLoop:
off := s + e.cur off := s + e.cur
e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
index0 = s + 1
if canRepeat { if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
@ -258,7 +259,6 @@ encodeLoop:
} }
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
index0 := s + repOff2
s += lenght + repOff2 s += lenght + repOff2
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
@ -498,15 +498,15 @@ encodeLoop:
} }
// Index match start+1 (long) -> s - 1 // Index match start+1 (long) -> s - 1
index0 := s - l + 1 off := index0 + e.cur
for index0 < s-1 { for index0 < s-1 {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
cv1 := cv0 >> 8 cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen) h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2 index0 += 2
off += 2
} }
cv = load6432(src, s) cv = load6432(src, s)
@ -672,7 +672,7 @@ encodeLoop:
var t int32 var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks // We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2 canRepeat := len(blk.sequences) > 2
var matched int32 var matched, index0 int32
for { for {
if debugAsserts && canRepeat && offset1 == 0 { if debugAsserts && canRepeat && offset1 == 0 {
@ -691,6 +691,7 @@ encodeLoop:
e.markLongShardDirty(nextHashL) e.markLongShardDirty(nextHashL)
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
e.markShortShardDirty(nextHashS) e.markShortShardDirty(nextHashS)
index0 = s + 1
if canRepeat { if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
@ -726,7 +727,6 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1 // Index match start+1 (long) -> s - 1
index0 := s + repOff
s += lenght + repOff s += lenght + repOff
nextEmit = s nextEmit = s
@ -790,7 +790,6 @@ encodeLoop:
} }
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
index0 := s + repOff2
s += lenght + repOff2 s += lenght + repOff2
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
@ -1024,18 +1023,18 @@ encodeLoop:
} }
// Index match start+1 (long) -> s - 1 // Index match start+1 (long) -> s - 1
index0 := s - l + 1 off := index0 + e.cur
for index0 < s-1 { for index0 < s-1 {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
cv1 := cv0 >> 8 cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen) h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0) e.markLongShardDirty(h0)
h1 := hashLen(cv1, betterShortTableBits, betterShortLen) h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1) e.markShortShardDirty(h1)
index0 += 2 index0 += 2
off += 2
} }
cv = load6432(src, s) cv = load6432(src, s)

View file

@ -1084,7 +1084,7 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
} }
} }
e.lastDictID = d.id e.lastDictID = d.id
e.allDirty = true allDirty = true
} }
// Reset table to initial state // Reset table to initial state
e.cur = e.maxMatchOff e.cur = e.maxMatchOff

View file

@ -133,8 +133,7 @@ encodeLoop:
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well. // Consider history as well.
var seq seq var seq seq
var length int32 length := 4 + e.matchlen(s+6, repIndex+4, src)
length = 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch) seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards. // We might be able to match backwards.
@ -645,8 +644,7 @@ encodeLoop:
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well. // Consider history as well.
var seq seq var seq seq
var length int32 length := 4 + e.matchlen(s+6, repIndex+4, src)
length = 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch) seq.matchLen = uint32(length - zstdMinMatch)
@ -831,13 +829,12 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
} }
if true { if true {
end := e.maxMatchOff + int32(len(d.content)) - 8 end := e.maxMatchOff + int32(len(d.content)) - 8
for i := e.maxMatchOff; i < end; i += 3 { for i := e.maxMatchOff; i < end; i += 2 {
const hashLog = tableBits const hashLog = tableBits
cv := load6432(d.content, i-e.maxMatchOff) cv := load6432(d.content, i-e.maxMatchOff)
nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6
nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7
nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7
e.dictTable[nextHash] = tableEntry{ e.dictTable[nextHash] = tableEntry{
val: uint32(cv), val: uint32(cv),
offset: i, offset: i,
@ -846,10 +843,6 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
val: uint32(cv >> 8), val: uint32(cv >> 8),
offset: i + 1, offset: i + 1,
} }
e.dictTable[nextHash2] = tableEntry{
val: uint32(cv >> 16),
offset: i + 2,
}
} }
} }
e.lastDictID = d.id e.lastDictID = d.id

View file

@ -227,10 +227,7 @@ func (e *Encoder) nextBlock(final bool) error {
DictID: e.o.dict.ID(), DictID: e.o.dict.ID(),
} }
dst, err := fh.appendTo(tmp[:0]) dst := fh.appendTo(tmp[:0])
if err != nil {
return err
}
s.headerWritten = true s.headerWritten = true
s.wWg.Wait() s.wWg.Wait()
var n2 int var n2 int
@ -483,7 +480,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
Checksum: false, Checksum: false,
DictID: 0, DictID: 0,
} }
dst, _ = fh.appendTo(dst) dst = fh.appendTo(dst)
// Write raw block as last one only. // Write raw block as last one only.
var blk blockHeader var blk blockHeader
@ -518,10 +515,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
dst = make([]byte, 0, len(src)) dst = make([]byte, 0, len(src))
} }
dst, err := fh.appendTo(dst) dst = fh.appendTo(dst)
if err != nil {
panic(err)
}
// If we can do everything in one block, prefer that. // If we can do everything in one block, prefer that.
if len(src) <= e.o.blockSize { if len(src) <= e.o.blockSize {
@ -581,6 +575,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// Add padding with content from crypto/rand.Reader // Add padding with content from crypto/rand.Reader
if e.o.pad > 0 { if e.o.pad > 0 {
add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
var err error
dst, err = skippableFrame(dst, add, rand.Reader) dst, err = skippableFrame(dst, add, rand.Reader)
if err != nil { if err != nil {
panic(err) panic(err)

View file

@ -129,7 +129,7 @@ func WithEncoderPadding(n int) EOption {
} }
// No need to waste our time. // No need to waste our time.
if n == 1 { if n == 1 {
o.pad = 0 n = 0
} }
if n > 1<<30 { if n > 1<<30 {
return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")

View file

@ -73,20 +73,20 @@ func (d *frameDec) reset(br byteBuffer) error {
switch err { switch err {
case io.EOF, io.ErrUnexpectedEOF: case io.EOF, io.ErrUnexpectedEOF:
return io.EOF return io.EOF
default:
return err
case nil: case nil:
signature[0] = b[0] signature[0] = b[0]
default:
return err
} }
// Read the rest, don't allow io.ErrUnexpectedEOF // Read the rest, don't allow io.ErrUnexpectedEOF
b, err = br.readSmall(3) b, err = br.readSmall(3)
switch err { switch err {
case io.EOF: case io.EOF:
return io.EOF return io.EOF
default:
return err
case nil: case nil:
copy(signature[1:], b) copy(signature[1:], b)
default:
return err
} }
if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {

View file

@ -22,7 +22,7 @@ type frameHeader struct {
const maxHeaderSize = 14 const maxHeaderSize = 14
func (f frameHeader) appendTo(dst []byte) ([]byte, error) { func (f frameHeader) appendTo(dst []byte) []byte {
dst = append(dst, frameMagic...) dst = append(dst, frameMagic...)
var fhd uint8 var fhd uint8
if f.Checksum { if f.Checksum {
@ -88,7 +88,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
default: default:
panic("invalid fcs") panic("invalid fcs")
} }
return dst, nil return dst
} }
const skippableFrameHeader = 4 + 4 const skippableFrameHeader = 4 + 4

View file

@ -0,0 +1,16 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package zstd
// matchLen returns how many bytes match in a and b
//
// It assumes that:
//
// len(a) <= len(b) and len(a) > 0
//
//go:noescape
func matchLen(a []byte, b []byte) int

View file

@ -0,0 +1,68 @@
// Copied from S2 implementation.
//go:build !appengine && !noasm && gc && !noasm
#include "textflag.h"
// func matchLen(a []byte, b []byte) int
// Requires: BMI
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
MOVQ a_len+8(FP), DX
// matchLen
XORL SI, SI
CMPL DX, $0x08
JB matchlen_match4_standalone
matchlen_loopback_standalone:
MOVQ (AX)(SI*1), BX
XORQ (CX)(SI*1), BX
TESTQ BX, BX
JZ matchlen_loop_standalone
#ifdef GOAMD64_v3
TZCNTQ BX, BX
#else
BSFQ BX, BX
#endif
SARQ $0x03, BX
LEAL (SI)(BX*1), SI
JMP gen_match_len_end
matchlen_loop_standalone:
LEAL -8(DX), DX
LEAL 8(SI), SI
CMPL DX, $0x08
JAE matchlen_loopback_standalone
matchlen_match4_standalone:
CMPL DX, $0x04
JB matchlen_match2_standalone
MOVL (AX)(SI*1), BX
CMPL (CX)(SI*1), BX
JNE matchlen_match2_standalone
LEAL -4(DX), DX
LEAL 4(SI), SI
matchlen_match2_standalone:
CMPL DX, $0x02
JB matchlen_match1_standalone
MOVW (AX)(SI*1), BX
CMPW (CX)(SI*1), BX
JNE matchlen_match1_standalone
LEAL -2(DX), DX
LEAL 2(SI), SI
matchlen_match1_standalone:
CMPL DX, $0x01
JB gen_match_len_end
MOVB (AX)(SI*1), BL
CMPB (CX)(SI*1), BL
JNE gen_match_len_end
INCL SI
gen_match_len_end:
MOVQ SI, ret+48(FP)
RET

View file

@ -0,0 +1,33 @@
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package zstd
import (
"encoding/binary"
"math/bits"
)
// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
func matchLen(a, b []byte) (n int) {
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3
}
n += 8
}
for i := range a {
if a[i] != b[i] {
break
}
n++
}
return n
}

View file

@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
var ll, mo, ml int var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) { if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
// inlined function: // inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState) // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
@ -452,18 +452,13 @@ func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol)
// extra bits are stored in reverse order. // extra bits are stored in reverse order.
br.fill() br.fill()
if s.maxBits <= 32 {
mo += br.getBits(moB)
ml += br.getBits(mlB)
ll += br.getBits(llB)
} else {
mo += br.getBits(moB) mo += br.getBits(moB)
if s.maxBits > 32 {
br.fill() br.fill()
}
// matchlength+literal length, max 32 bits // matchlength+literal length, max 32 bits
ml += br.getBits(mlB) ml += br.getBits(mlB)
ll += br.getBits(llB) ll += br.getBits(llB)
}
mo = s.adjustOffset(mo, ll, moB) mo = s.adjustOffset(mo, ll, moB)
return return
} }

View file

@ -5,11 +5,11 @@
// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: CMOV // Requires: CMOV
TEXT ·sequenceDecs_decode_amd64(SB), $8-32 TEXT ·sequenceDecs_decode_amd64(SB), $8-32
MOVQ br+8(FP), AX MOVQ br+8(FP), CX
MOVQ 32(AX), DX MOVQ 24(CX), DX
MOVBQZX 40(AX), BX MOVBQZX 32(CX), BX
MOVQ 24(AX), SI MOVQ (CX), AX
MOVQ (AX), AX MOVQ 8(CX), SI
ADDQ SI, AX ADDQ SI, AX
MOVQ AX, (SP) MOVQ AX, (SP)
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -301,9 +301,9 @@ sequenceDecs_decode_amd64_match_len_ofs_ok:
MOVQ R12, 152(AX) MOVQ R12, 152(AX)
MOVQ R13, 160(AX) MOVQ R13, 160(AX)
MOVQ br+8(FP), AX MOVQ br+8(FP), AX
MOVQ DX, 32(AX) MOVQ DX, 24(AX)
MOVB BL, 40(AX) MOVB BL, 32(AX)
MOVQ SI, 24(AX) MOVQ SI, 8(AX)
// Return success // Return success
MOVQ $0x00000000, ret+24(FP) MOVQ $0x00000000, ret+24(FP)
@ -336,11 +336,11 @@ error_overread:
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: CMOV // Requires: CMOV
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
MOVQ br+8(FP), AX MOVQ br+8(FP), CX
MOVQ 32(AX), DX MOVQ 24(CX), DX
MOVBQZX 40(AX), BX MOVBQZX 32(CX), BX
MOVQ 24(AX), SI MOVQ (CX), AX
MOVQ (AX), AX MOVQ 8(CX), SI
ADDQ SI, AX ADDQ SI, AX
MOVQ AX, (SP) MOVQ AX, (SP)
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -603,9 +603,9 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok:
MOVQ R12, 152(AX) MOVQ R12, 152(AX)
MOVQ R13, 160(AX) MOVQ R13, 160(AX)
MOVQ br+8(FP), AX MOVQ br+8(FP), AX
MOVQ DX, 32(AX) MOVQ DX, 24(AX)
MOVB BL, 40(AX) MOVB BL, 32(AX)
MOVQ SI, 24(AX) MOVQ SI, 8(AX)
// Return success // Return success
MOVQ $0x00000000, ret+24(FP) MOVQ $0x00000000, ret+24(FP)
@ -638,11 +638,11 @@ error_overread:
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV // Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
MOVQ br+8(FP), CX MOVQ br+8(FP), BX
MOVQ 32(CX), AX MOVQ 24(BX), AX
MOVBQZX 40(CX), DX MOVBQZX 32(BX), DX
MOVQ 24(CX), BX MOVQ (BX), CX
MOVQ (CX), CX MOVQ 8(BX), BX
ADDQ BX, CX ADDQ BX, CX
MOVQ CX, (SP) MOVQ CX, (SP)
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
@ -892,9 +892,9 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok:
MOVQ R11, 152(CX) MOVQ R11, 152(CX)
MOVQ R12, 160(CX) MOVQ R12, 160(CX)
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ AX, 32(CX) MOVQ AX, 24(CX)
MOVB DL, 40(CX) MOVB DL, 32(CX)
MOVQ BX, 24(CX) MOVQ BX, 8(CX)
// Return success // Return success
MOVQ $0x00000000, ret+24(FP) MOVQ $0x00000000, ret+24(FP)
@ -927,11 +927,11 @@ error_overread:
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV // Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
MOVQ br+8(FP), CX MOVQ br+8(FP), BX
MOVQ 32(CX), AX MOVQ 24(BX), AX
MOVBQZX 40(CX), DX MOVBQZX 32(BX), DX
MOVQ 24(CX), BX MOVQ (BX), CX
MOVQ (CX), CX MOVQ 8(BX), BX
ADDQ BX, CX ADDQ BX, CX
MOVQ CX, (SP) MOVQ CX, (SP)
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
@ -1152,9 +1152,9 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
MOVQ R11, 152(CX) MOVQ R11, 152(CX)
MOVQ R12, 160(CX) MOVQ R12, 160(CX)
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ AX, 32(CX) MOVQ AX, 24(CX)
MOVB DL, 40(CX) MOVB DL, 32(CX)
MOVQ BX, 24(CX) MOVQ BX, 8(CX)
// Return success // Return success
MOVQ $0x00000000, ret+24(FP) MOVQ $0x00000000, ret+24(FP)
@ -1797,11 +1797,11 @@ empty_seqs:
// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// Requires: CMOV, SSE // Requires: CMOV, SSE
TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
MOVQ br+8(FP), AX MOVQ br+8(FP), CX
MOVQ 32(AX), DX MOVQ 24(CX), DX
MOVBQZX 40(AX), BX MOVBQZX 32(CX), BX
MOVQ 24(AX), SI MOVQ (CX), AX
MOVQ (AX), AX MOVQ 8(CX), SI
ADDQ SI, AX ADDQ SI, AX
MOVQ AX, (SP) MOVQ AX, (SP)
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -2295,9 +2295,9 @@ handle_loop:
loop_finished: loop_finished:
MOVQ br+8(FP), AX MOVQ br+8(FP), AX
MOVQ DX, 32(AX) MOVQ DX, 24(AX)
MOVB BL, 40(AX) MOVB BL, 32(AX)
MOVQ SI, 24(AX) MOVQ SI, 8(AX)
// Update the context // Update the context
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -2362,11 +2362,11 @@ error_not_enough_space:
// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// Requires: BMI, BMI2, CMOV, SSE // Requires: BMI, BMI2, CMOV, SSE
TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
MOVQ br+8(FP), CX MOVQ br+8(FP), BX
MOVQ 32(CX), AX MOVQ 24(BX), AX
MOVBQZX 40(CX), DX MOVBQZX 32(BX), DX
MOVQ 24(CX), BX MOVQ (BX), CX
MOVQ (CX), CX MOVQ 8(BX), BX
ADDQ BX, CX ADDQ BX, CX
MOVQ CX, (SP) MOVQ CX, (SP)
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
@ -2818,9 +2818,9 @@ handle_loop:
loop_finished: loop_finished:
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ AX, 32(CX) MOVQ AX, 24(CX)
MOVB DL, 40(CX) MOVB DL, 32(CX)
MOVQ BX, 24(CX) MOVQ BX, 8(CX)
// Update the context // Update the context
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -2885,11 +2885,11 @@ error_not_enough_space:
// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// Requires: CMOV, SSE // Requires: CMOV, SSE
TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
MOVQ br+8(FP), AX MOVQ br+8(FP), CX
MOVQ 32(AX), DX MOVQ 24(CX), DX
MOVBQZX 40(AX), BX MOVBQZX 32(CX), BX
MOVQ 24(AX), SI MOVQ (CX), AX
MOVQ (AX), AX MOVQ 8(CX), SI
ADDQ SI, AX ADDQ SI, AX
MOVQ AX, (SP) MOVQ AX, (SP)
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -3485,9 +3485,9 @@ handle_loop:
loop_finished: loop_finished:
MOVQ br+8(FP), AX MOVQ br+8(FP), AX
MOVQ DX, 32(AX) MOVQ DX, 24(AX)
MOVB BL, 40(AX) MOVB BL, 32(AX)
MOVQ SI, 24(AX) MOVQ SI, 8(AX)
// Update the context // Update the context
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -3552,11 +3552,11 @@ error_not_enough_space:
// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// Requires: BMI, BMI2, CMOV, SSE // Requires: BMI, BMI2, CMOV, SSE
TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
MOVQ br+8(FP), CX MOVQ br+8(FP), BX
MOVQ 32(CX), AX MOVQ 24(BX), AX
MOVBQZX 40(CX), DX MOVBQZX 32(BX), DX
MOVQ 24(CX), BX MOVQ (BX), CX
MOVQ (CX), CX MOVQ 8(BX), BX
ADDQ BX, CX ADDQ BX, CX
MOVQ CX, (SP) MOVQ CX, (SP)
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
@ -4110,9 +4110,9 @@ handle_loop:
loop_finished: loop_finished:
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ AX, 32(CX) MOVQ AX, 24(CX)
MOVB DL, 40(CX) MOVB DL, 32(CX)
MOVQ BX, 24(CX) MOVQ BX, 8(CX)
// Update the context // Update the context
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX

View file

@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
} }
for i := range seqs { for i := range seqs {
var ll, mo, ml int var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) { if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
// inlined function: // inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState) // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)

View file

@ -95,10 +95,9 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
var written int64 var written int64
var readHeader bool var readHeader bool
{ {
var header []byte header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
var n int
header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
var n int
n, r.err = w.Write(header) n, r.err = w.Write(header)
if r.err != nil { if r.err != nil {
return written, r.err return written, r.err

View file

@ -9,7 +9,6 @@ import (
"errors" "errors"
"log" "log"
"math" "math"
"math/bits"
) )
// enable debug printing // enable debug printing
@ -106,27 +105,6 @@ func printf(format string, a ...interface{}) {
} }
} }
// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
func matchLen(a, b []byte) (n int) {
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3
}
n += 8
}
for i := range a {
if a[i] != b[i] {
break
}
n++
}
return n
}
func load3232(b []byte, i int32) uint32 { func load3232(b []byte, i int32) uint32 {
return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
} }

4
vendor/modules.txt vendored
View file

@ -241,8 +241,8 @@ github.com/inconshreveable/mousetrap
# github.com/jmespath/go-jmespath v0.4.0 # github.com/jmespath/go-jmespath v0.4.0
## explicit; go 1.14 ## explicit; go 1.14
github.com/jmespath/go-jmespath github.com/jmespath/go-jmespath
# github.com/klauspost/compress v1.16.5 # github.com/klauspost/compress v1.17.4
## explicit; go 1.18 ## explicit; go 1.19
github.com/klauspost/compress github.com/klauspost/compress
github.com/klauspost/compress/fse github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0