Skip to content

Commit

Permalink
General code cleanup (#813)
Browse files Browse the repository at this point in the history
  • Loading branch information
klauspost committed Apr 29, 2023
1 parent 2cf14c4 commit 343440d
Show file tree
Hide file tree
Showing 15 changed files with 16 additions and 70 deletions.
5 changes: 2 additions & 3 deletions flate/deflate.go
Expand Up @@ -90,9 +90,8 @@ type advancedState struct {
ii uint16 // position of last match, intended to overflow to reset.

// input window: unprocessed data is window[index:windowEnd]
index int
estBitsPerByte int
hashMatch [maxMatchLength + minMatchLength]uint32
index int
hashMatch [maxMatchLength + minMatchLength]uint32

// Input hash chains
// hashHead[hashValue] contains the largest inputIndex with the specified hash value
Expand Down
5 changes: 0 additions & 5 deletions flate/huffman_bit_writer.go
Expand Up @@ -34,11 +34,6 @@ const (
// Should preferably be a multiple of 6, since
// we accumulate 6 bytes between writes to the buffer.
bufferFlushSize = 246

// bufferSize is the actual output byte buffer size.
// It must have additional headroom for a flush
// which can contain up to 8 bytes.
bufferSize = bufferFlushSize + 8
)

// Minimum length code that emits bits.
Expand Down
19 changes: 0 additions & 19 deletions flate/huffman_sortByFreq.go
Expand Up @@ -42,25 +42,6 @@ func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
}
}

// siftDownByFreq implements the heap property on data[lo, hi).
// first is an offset into the array where the root of the heap lies.
func siftDownByFreq(data []literalNode, lo, hi, first int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) {
child++
}
if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
root = child
}
}
func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
if hi-lo > 40 {
Expand Down
8 changes: 0 additions & 8 deletions huff0/bitwriter.go
Expand Up @@ -13,14 +13,6 @@ type bitWriter struct {
out []byte
}

// bitMask16 is bitmasks. Has extra to avoid bounds check.
var bitMask16 = [32]uint16{
0, 1, 3, 7, 0xF, 0x1F,
0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF} /* up to 16 bits */

// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
Expand Down
2 changes: 1 addition & 1 deletion huff0/decompress.go
Expand Up @@ -253,7 +253,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {

switch d.actualTableLog {
case 8:
const shift = 8 - 8
const shift = 0
for br.off >= 4 {
br.fillFast()
v := dt[uint8(br.value>>(56+shift))]
Expand Down
12 changes: 0 additions & 12 deletions internal/snapref/encode_other.go
Expand Up @@ -87,18 +87,6 @@ func emitCopy(dst []byte, offset, length int) int {
return i + 2
}

// extendMatch returns the largest k such that k <= len(src) and that
// src[i:i+k-j] and src[j:k] have the same contents.
//
// It assumes that:
//
// 0 <= i && i < j && j <= len(src)
func extendMatch(src []byte, i, j int) int {
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
}
return j
}

func hash(u, shift uint32) uint32 {
return (u * 0x1e35a7bd) >> shift
}
Expand Down
4 changes: 2 additions & 2 deletions s2/cmd/internal/readahead/reader.go
Expand Up @@ -293,8 +293,8 @@ func (a *reader) initBuffers(rd io.Reader, buffers [][]byte, size int) {
a.in = rd
a.ready = make(chan *buffer, len(buffers))
a.reuse = make(chan *buffer, len(buffers))
a.exit = make(chan struct{}, 0)
a.exited = make(chan struct{}, 0)
a.exit = make(chan struct{})
a.exited = make(chan struct{})
a.buffers = len(buffers)
a.size = size
a.cur = nil
Expand Down
1 change: 0 additions & 1 deletion s2/encode_all.go
Expand Up @@ -742,7 +742,6 @@ searchDict:
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>8, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s - 1)
cv = load64(src, s)
Expand Down
3 changes: 0 additions & 3 deletions s2/encode_better.go
Expand Up @@ -157,7 +157,6 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
index0 := base + 1
index1 := s - 2

cv = load64(src, s)
for index0 < index1 {
cv0 := load64(src, index0)
cv1 := load64(src, index1)
Expand Down Expand Up @@ -599,7 +598,6 @@ searchDict:
if s >= sLimit {
break searchDict
}
cv = load64(src, s)
// Index in-between
index0 := base + 1
index1 := s - 2
Expand Down Expand Up @@ -961,7 +959,6 @@ searchDict:
index0 := base + 1
index1 := s - 2

cv = load64(src, s)
for index0 < index1 {
cv0 := load64(src, index0)
cv1 := load64(src, index1)
Expand Down
2 changes: 1 addition & 1 deletion s2/writer.go
Expand Up @@ -771,7 +771,7 @@ func (w *Writer) closeIndex(idx bool) ([]byte, error) {
}

var index []byte
if w.err(nil) == nil && w.writer != nil {
if w.err(err) == nil && w.writer != nil {
// Create index.
if idx {
compSize := int64(-1)
Expand Down
8 changes: 2 additions & 6 deletions zip/reader.go
Expand Up @@ -715,12 +715,8 @@ func (f *fileListEntry) Info() (fs.FileInfo, error) { return f, nil }
func toValidName(name string) string {
name = strings.ReplaceAll(name, `\`, `/`)
p := path.Clean(name)
if strings.HasPrefix(p, "/") {
p = p[len("/"):]
}
for strings.HasPrefix(p, "../") {
p = p[len("../"):]
}
p = strings.TrimPrefix(p, "/")
p = strings.TrimPrefix(p, "../")
return p
}

Expand Down
1 change: 1 addition & 0 deletions zip/writer.go
Expand Up @@ -433,6 +433,7 @@ func min64(x, y uint64) uint64 {
return y
}

// CreateHeaderRaw is replaced by CreateRaw.
// Deprecated: CreateHeaderRaw is replaced by CreateRaw (stdlib name).
func (w *Writer) CreateHeaderRaw(fh *FileHeader) (io.Writer, error) {
return w.CreateRaw(fh)
Expand Down
2 changes: 1 addition & 1 deletion zstd/decoder_options.go
Expand Up @@ -107,7 +107,7 @@ func WithDecoderDicts(dicts ...[]byte) DOption {
}
}

// WithEncoderDictRaw registers a dictionary that may be used by the decoder.
// WithDecoderDictRaw registers a dictionary that may be used by the decoder.
// The slice content can be arbitrary data.
func WithDecoderDictRaw(id uint32, content []byte) DOption {
return func(o *decoderOptions) error {
Expand Down
6 changes: 2 additions & 4 deletions zstd/enc_fast.go
Expand Up @@ -133,8 +133,7 @@ encodeLoop:
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well.
var seq seq
var length int32
length = 4 + e.matchlen(s+6, repIndex+4, src)
length := 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)

// We might be able to match backwards.
Expand Down Expand Up @@ -645,8 +644,7 @@ encodeLoop:
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well.
var seq seq
var length int32
length = 4 + e.matchlen(s+6, repIndex+4, src)
length := 4 + e.matchlen(s+6, repIndex+4, src)

seq.matchLen = uint32(length - zstdMinMatch)

Expand Down
8 changes: 4 additions & 4 deletions zstd/framedec.go
Expand Up @@ -73,20 +73,20 @@ func (d *frameDec) reset(br byteBuffer) error {
switch err {
case io.EOF, io.ErrUnexpectedEOF:
return io.EOF
default:
return err
case nil:
signature[0] = b[0]
default:
return err
}
// Read the rest, don't allow io.ErrUnexpectedEOF
b, err = br.readSmall(3)
switch err {
case io.EOF:
return io.EOF
default:
return err
case nil:
copy(signature[1:], b)
default:
return err
}

if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
Expand Down

0 comments on commit 343440d

Please sign in to comment.