Skip to content

Commit

Permalink
Fix a few unnecessary type conversions (#339)
Browse files Browse the repository at this point in the history
This PR gets rid of a bunch of unneeded type conversions. I was checking out the code and
saw a few of these myself, but most of them were fixed using unconvert.
  • Loading branch information
Jacalz committed Mar 19, 2021
1 parent 4fd183f commit 81c3975
Show file tree
Hide file tree
Showing 13 changed files with 32 additions and 32 deletions.
2 changes: 1 addition & 1 deletion flate/fast_encoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ func matchLen(a, b []byte) int {
b = b[:len(a)]
for i := range a {
if a[i] != b[i] {
return int(i) + checked
return i + checked
}
}
return len(a) + checked
Expand Down
2 changes: 1 addition & 1 deletion flate/level2.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {

// Store every second hash in-between, but offset by 1.
for i := s - l + 2; i < s-5; i += 7 {
x := load6432(src, int32(i))
x := load6432(src, i)
nextHash := hash4u(uint32(x), bTableBits)
e.table[nextHash] = tableEntry{offset: e.cur + i}
// Skip one
Expand Down
10 changes: 5 additions & 5 deletions fse/compress.go
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ func (s *Scratch) writeCount() error {
out[outP+1] = byte(bitStream >> 8)
outP += (bitCount + 7) / 8

if uint16(charnum) > s.symbolLen {
if charnum > s.symbolLen {
return errors.New("internal error: charnum > s.symbolLen")
}
s.Out = out[:outP]
Expand Down Expand Up @@ -331,7 +331,7 @@ type cTable struct {
func (s *Scratch) allocCtable() {
tableSize := 1 << s.actualTableLog
// get tableSymbol that is big enough.
if cap(s.ct.tableSymbol) < int(tableSize) {
if cap(s.ct.tableSymbol) < tableSize {
s.ct.tableSymbol = make([]byte, tableSize)
}
s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
Expand Down Expand Up @@ -565,8 +565,8 @@ func (s *Scratch) normalizeCount2() error {
distributed uint32
total = uint32(s.br.remain())
tableLog = s.actualTableLog
lowThreshold = uint32(total >> tableLog)
lowOne = uint32((total * 3) >> (tableLog + 1))
lowThreshold = total >> tableLog
lowOne = (total * 3) >> (tableLog + 1)
)
for i, cnt := range s.count[:s.symbolLen] {
if cnt == 0 {
Expand All @@ -591,7 +591,7 @@ func (s *Scratch) normalizeCount2() error {

if (total / toDistribute) > lowOne {
// risk of rounding to zero
lowOne = uint32((total * 3) / (toDistribute * 2))
lowOne = (total * 3) / (toDistribute * 2)
for i, cnt := range s.count[:s.symbolLen] {
if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
s.norm[i] = 1
Expand Down
4 changes: 2 additions & 2 deletions fse/decompress.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ type decSymbol struct {
// allocDtable will allocate decoding tables if they are not big enough.
func (s *Scratch) allocDtable() {
tableSize := 1 << s.actualTableLog
if cap(s.decTable) < int(tableSize) {
if cap(s.decTable) < tableSize {
s.decTable = make([]decSymbol, tableSize)
}
s.decTable = s.decTable[:tableSize]
Expand Down Expand Up @@ -340,7 +340,7 @@ type decoder struct {
func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) {
d.dt = dt
d.br = in
d.state = uint16(in.getBits(tableLog))
d.state = in.getBits(tableLog)
}

// next returns the next symbol and sets the next state.
Expand Down
4 changes: 2 additions & 2 deletions huff0/compress.go
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ func (s *Scratch) buildCTable() error {
var startNode = int16(s.symbolLen)
nonNullRank := s.symbolLen - 1

nodeNb := int16(startNode)
nodeNb := startNode
huffNode := s.nodes[1 : huffNodesLen+1]

// This overlays the slice above, but allows "-1" index lookups.
Expand Down Expand Up @@ -580,7 +580,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {

// Get pos of last (smallest) symbol per rank
{
currentNbBits := uint8(maxNbBits)
currentNbBits := maxNbBits
for pos := int(n); pos >= 0; pos-- {
if huffNode[pos].nbBits >= currentNbBits {
continue
Expand Down
2 changes: 1 addition & 1 deletion s2/s2.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ var crcTable = crc32.MakeTable(crc32.Castagnoli)
// https://github.com/google/snappy/blob/master/framing_format.txt
func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return uint32(c>>15|c<<17) + 0xa282ead8
return c>>15 | c<<17 + 0xa282ead8
}

// literalExtraSize returns the extra size of encoding n literals.
Expand Down
2 changes: 1 addition & 1 deletion snappy/snappy.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,5 +94,5 @@ var crcTable = crc32.MakeTable(crc32.Castagnoli)
// https://github.com/google/snappy/blob/master/framing_format.txt
func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return uint32(c>>15|c<<17) + 0xa282ead8
return c>>15 | c<<17 + 0xa282ead8
}
4 changes: 2 additions & 2 deletions zip/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -374,8 +374,8 @@ parseExtras:

const ticksPerSecond = 1e7 // Windows timestamp resolution
ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
secs := int64(ts / ticksPerSecond)
nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond)
secs := ts / ticksPerSecond
nsecs := (1e9 / ticksPerSecond) * (ts % ticksPerSecond)
epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
modified = time.Unix(epoch.Unix()+secs, nsecs)
}
Expand Down
2 changes: 1 addition & 1 deletion zip/reader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1047,7 +1047,7 @@ func TestIssue12449(t *testing.T) {
0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00,
}
// Read in the archive.
_, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
_, err := NewReader(bytes.NewReader(data), int64(len(data)))
if err != nil {
t.Errorf("Error reading the archive: %v", err)
}
Expand Down
2 changes: 1 addition & 1 deletion zip/zip_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -597,7 +597,7 @@ func testZip64(t testing.TB, size int64) *rleBuffer {
}

// read back zip file and check that we get to the end of it
r, err := NewReader(buf, int64(buf.Size()))
r, err := NewReader(buf, buf.Size())
if err != nil {
t.Fatal("reader:", err)
}
Expand Down
16 changes: 8 additions & 8 deletions zstd/encoder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ func TestEncoder_EncodeAllSimple(t *testing.T) {
defer dec.Close()

in = append(in, in...)
for level := EncoderLevel(speedNotSet + 1); level < speedLast; level++ {
for level := speedNotSet + 1; level < speedLast; level++ {
t.Run(level.String(), func(t *testing.T) {
e, err := NewWriter(nil, WithEncoderLevel(level), WithEncoderConcurrency(2), WithWindowSize(128<<10), WithZeroFrames(true))
if err != nil {
Expand Down Expand Up @@ -79,7 +79,7 @@ func TestEncoder_EncodeAllConcurrent(t *testing.T) {
t.Fatal(err)
}
defer dec.Close()
for level := EncoderLevel(speedNotSet + 1); level < speedLast; level++ {
for level := speedNotSet + 1; level < speedLast; level++ {
t.Run(level.String(), func(t *testing.T) {
rng := rand.New(rand.NewSource(0x1337))
e, err := NewWriter(nil, WithEncoderLevel(level), WithZeroFrames(true))
Expand Down Expand Up @@ -131,7 +131,7 @@ func TestEncoder_EncodeAllEncodeXML(t *testing.T) {
in = in[:10000]
}

for level := EncoderLevel(speedNotSet + 1); level < speedLast; level++ {
for level := speedNotSet + 1; level < speedLast; level++ {
t.Run(level.String(), func(t *testing.T) {
e, err := NewWriter(nil, WithEncoderLevel(level))
if err != nil {
Expand Down Expand Up @@ -174,7 +174,7 @@ func TestEncoderRegression(t *testing.T) {
if testing.Short() {
testWindowSizes = []int{1 << 20}
}
for level := EncoderLevel(speedNotSet + 1); level < speedLast; level++ {
for level := speedNotSet + 1; level < speedLast; level++ {
t.Run(level.String(), func(t *testing.T) {
for _, windowSize := range testWindowSizes {
t.Run(fmt.Sprintf("window:%d", windowSize), func(t *testing.T) {
Expand Down Expand Up @@ -260,7 +260,7 @@ func TestEncoder_EncodeAllTwain(t *testing.T) {
}
defer dec.Close()

for level := EncoderLevel(speedNotSet + 1); level < speedLast; level++ {
for level := speedNotSet + 1; level < speedLast; level++ {
t.Run(level.String(), func(t *testing.T) {
for _, windowSize := range testWindowSizes {
t.Run(fmt.Sprintf("window:%d", windowSize), func(t *testing.T) {
Expand Down Expand Up @@ -306,7 +306,7 @@ func TestEncoder_EncodeAllPi(t *testing.T) {
}
defer dec.Close()

for level := EncoderLevel(speedNotSet + 1); level < speedLast; level++ {
for level := speedNotSet + 1; level < speedLast; level++ {
t.Run(level.String(), func(t *testing.T) {
for _, windowSize := range testWindowSizes {
t.Run(fmt.Sprintf("window:%d", windowSize), func(t *testing.T) {
Expand Down Expand Up @@ -885,7 +885,7 @@ func BenchmarkEncoder_EncodeAllSimple(b *testing.B) {
b.Fatal(err)
}

for level := EncoderLevel(speedNotSet + 1); level < speedLast; level++ {
for level := speedNotSet + 1; level < speedLast; level++ {
b.Run(level.String(), func(b *testing.B) {
enc, err := NewWriter(nil, WithEncoderConcurrency(1), WithEncoderLevel(level))
if err != nil {
Expand Down Expand Up @@ -918,7 +918,7 @@ func BenchmarkEncoder_EncodeAllSimple4K(b *testing.B) {
}
in = in[:4096]

for level := EncoderLevel(speedNotSet + 1); level < speedLast; level++ {
for level := speedNotSet + 1; level < speedLast; level++ {
b.Run(level.String(), func(b *testing.B) {
enc, err := NewWriter(nil, WithEncoderConcurrency(1), WithEncoderLevel(level))
if err != nil {
Expand Down
12 changes: 6 additions & 6 deletions zstd/fse_encoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func (s *fseEncoder) prepare() (*fseEncoder, error) {
func (s *fseEncoder) allocCtable() {
tableSize := 1 << s.actualTableLog
// get tableSymbol that is big enough.
if cap(s.ct.tableSymbol) < int(tableSize) {
if cap(s.ct.tableSymbol) < tableSize {
s.ct.tableSymbol = make([]byte, tableSize)
}
s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
Expand Down Expand Up @@ -202,13 +202,13 @@ func (s *fseEncoder) buildCTable() error {
case 0:
case -1, 1:
symbolTT[i].deltaNbBits = tl
symbolTT[i].deltaFindState = int16(total - 1)
symbolTT[i].deltaFindState = total - 1
total++
default:
maxBitsOut := uint32(tableLog) - highBit(uint32(v-1))
minStatePlus := uint32(v) << maxBitsOut
symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
symbolTT[i].deltaFindState = int16(total - v)
symbolTT[i].deltaFindState = total - v
total += v
}
}
Expand Down Expand Up @@ -353,8 +353,8 @@ func (s *fseEncoder) normalizeCount2(length int) error {
distributed uint32
total = uint32(length)
tableLog = s.actualTableLog
lowThreshold = uint32(total >> tableLog)
lowOne = uint32((total * 3) >> (tableLog + 1))
lowThreshold = total >> tableLog
lowOne = (total * 3) >> (tableLog + 1)
)
for i, cnt := range s.count[:s.symbolLen] {
if cnt == 0 {
Expand All @@ -379,7 +379,7 @@ func (s *fseEncoder) normalizeCount2(length int) error {

if (total / toDistribute) > lowOne {
// risk of rounding to zero
lowOne = uint32((total * 3) / (toDistribute * 2))
lowOne = (total * 3) / (toDistribute * 2)
for i, cnt := range s.count[:s.symbolLen] {
if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
s.norm[i] = 1
Expand Down
2 changes: 1 addition & 1 deletion zstd/snappy.go
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,7 @@ var crcTable = crc32.MakeTable(crc32.Castagnoli)
// https://github.com/google/snappy/blob/master/framing_format.txt
func snappyCRC(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return uint32(c>>15|c<<17) + 0xa282ead8
return c>>15 | c<<17 + 0xa282ead8
}

// snappyDecodedLen returns the length of the decoded block and the number of bytes
Expand Down

0 comments on commit 81c3975

Please sign in to comment.