Skip to content

Commit

Permalink
chore(ci): upgraded to go 1.22 (#3746)
Browse files Browse the repository at this point in the history
Upgrades go to 1.22 and switches to new-style for loops

---------

Co-authored-by: Julio López <1953782+julio-lopez@users.noreply.github.com>
  • Loading branch information
jkowalski and julio-lopez committed Apr 8, 2024
1 parent 67f87a5 commit 09415e0
Show file tree
Hide file tree
Showing 120 changed files with 234 additions and 372 deletions.
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -268,6 +268,7 @@ dev-deps:
GO111MODULE=off go get -u github.com/sqs/goreturns

test-with-coverage: export KOPIA_COVERAGE_TEST=1
test-with-coverage: export GOEXPERIMENT=nocoverageredesign
test-with-coverage: export TESTING_ACTION_EXE ?= $(TESTING_ACTION_EXE)
test-with-coverage: $(gotestsum) $(TESTING_ACTION_EXE)
$(GO_TEST) $(UNIT_TEST_RACE_FLAGS) -tags testing -count=$(REPEAT_TEST) -short -covermode=atomic -coverprofile=coverage.txt --coverpkg $(COVERAGE_PACKAGES) -timeout $(UNIT_TESTS_TIMEOUT) ./...
Expand Down
8 changes: 3 additions & 5 deletions cli/command_benchmark.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,9 @@ func runInParallelNoResult[A any](args []A, run func(arg A)) {
func runInParallel[A any, T any](args []A, run func(arg A) T) T {
var wg sync.WaitGroup

for i := 0; i < len(args)-1; i++ {
for _, arg := range args[1:] {
wg.Add(1)

arg := args[i]

go func() {
defer wg.Done()

Expand All @@ -70,7 +68,7 @@ func runInParallel[A any, T any](args []A, run func(arg A) T) T {
}

// run one on the main goroutine and N-1 in parallel.
v := run(args[len(args)-1])
v := run(args[0])

wg.Wait()

Expand All @@ -80,7 +78,7 @@ func runInParallel[A any, T any](args []A, run func(arg A) T) T {
func makeOutputBuffers(n, capacity int) []*bytes.Buffer {
var res []*bytes.Buffer

for i := 0; i < n; i++ {
for range n {
res = append(res, bytes.NewBuffer(make([]byte, 0, capacity)))
}

Expand Down
4 changes: 2 additions & 2 deletions cli/command_benchmark_compression.go
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ func (c *commandBenchmarkCompression) runCompression(ctx context.Context, data [
input = bytes.NewReader(nil)
)

for i := 0; i < cnt; i++ {
for i := range cnt {
compressed.Reset()
input.Reset(data)

Expand Down Expand Up @@ -268,7 +268,7 @@ func (c *commandBenchmarkCompression) runDecompression(ctx context.Context, data
run := func(decompressed *bytes.Buffer) int64 {
input := bytes.NewReader(nil)

for i := 0; i < cnt; i++ {
for range cnt {
decompressed.Reset()
input.Reset(compressedInputBytes)

Expand Down
2 changes: 1 addition & 1 deletion cli/command_benchmark_crypto.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func (c *commandBenchmarkCrypto) runBenchmark(ctx context.Context) []cryptoBench
var encryptOutput gather.WriteBuffer
defer encryptOutput.Close()

for i := 0; i < hashCount; i++ {
for range hashCount {
contentID := hf(hashOutput[:0], input)

if encerr := enc.Encrypt(input, contentID, &encryptOutput); encerr != nil {
Expand Down
6 changes: 3 additions & 3 deletions cli/command_benchmark_ecc.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func (c *commandBenchmarkEcc) runBenchmark(ctx context.Context) []eccBenchResult
var results []eccBenchResult

data := make([]byte, c.blockSize)
for i := uint64(0); i < uint64(c.blockSize); i++ {
for i := range uint64(c.blockSize) {
data[i] = byte(i%255 + 1)
}

Expand Down Expand Up @@ -99,7 +99,7 @@ func (c *commandBenchmarkEcc) runBenchmark(ctx context.Context) []eccBenchResult
var tmp gather.WriteBuffer
defer tmp.Close()

for i := 0; i < repeat; i++ {
for range repeat {
if encerr := impl.Encrypt(input, nil, &tmp); encerr != nil {
log(ctx).Errorf("encoding failed: %v", encerr)
break
Expand All @@ -125,7 +125,7 @@ func (c *commandBenchmarkEcc) runBenchmark(ctx context.Context) []eccBenchResult
var tmp gather.WriteBuffer
defer tmp.Close()

for i := 0; i < repeat; i++ {
for range repeat {
if decerr := impl.Decrypt(input, nil, &tmp); decerr != nil {
log(ctx).Errorf("decoding failed: %v", decerr)
break
Expand Down
2 changes: 1 addition & 1 deletion cli/command_benchmark_encryption.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func (c *commandBenchmarkEncryption) runBenchmark(ctx context.Context) []cryptoB
var encryptOutput gather.WriteBuffer
defer encryptOutput.Close()

for i := 0; i < hashCount; i++ {
for range hashCount {
if encerr := enc.Encrypt(input, hashOutput[:32], &encryptOutput); encerr != nil {
log(ctx).Errorf("encryption failed: %v", encerr)
break
Expand Down
2 changes: 1 addition & 1 deletion cli/command_benchmark_hashing.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ func (c *commandBenchmarkHashing) runBenchmark(ctx context.Context) []cryptoBenc
runInParallelNoInputNoResult(c.parallel, func() {
var hashOutput [hashing.MaxHashSize]byte

for i := 0; i < hashCount; i++ {
for range hashOutput {
hf(hashOutput[:0], input)
}
})
Expand Down
5 changes: 2 additions & 3 deletions cli/command_benchmark_splitters.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func (c *commandBenchmarkSplitters) run(ctx context.Context) error { //nolint:fu

rnd := rand.New(rand.NewSource(c.randSeed)) //nolint:gosec

for i := 0; i < c.blockCount; i++ {
for range c.blockCount {
b := make([]byte, c.blockSize)
if _, err := rnd.Read(b); err != nil {
return errors.Wrap(err, "error generating random data")
Expand All @@ -85,10 +85,9 @@ func (c *commandBenchmarkSplitters) run(ctx context.Context) error { //nolint:fu

var segmentLengths []int

for _, data := range dataBlocks {
for _, d := range dataBlocks {
s := fact()

d := data
for len(d) > 0 {
n := s.NextSplitPoint(d)
if n < 0 {
Expand Down
2 changes: 1 addition & 1 deletion cli/command_blob_stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ func (c *commandBlobStats) run(ctx context.Context, rep repo.DirectRepository) e

var sizeThresholds []int64

for i := 0; i < 8; i++ {
for range 8 {
sizeThresholds = append(sizeThresholds, sizeThreshold)
countMap[sizeThreshold] = 0
sizeThreshold *= 10
Expand Down
2 changes: 1 addition & 1 deletion cli/command_cache_sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func (c *commandCacheSync) run(ctx context.Context, rep repo.DirectRepositoryWri
ch := make(chan blob.ID, c.parallel)

// workers that will prefetch blobs.
for i := 0; i < c.parallel; i++ {
for range c.parallel {
eg.Go(func() error {
for blobID := range ch {
if err := rep.ContentManager().MetadataCache().PrefetchBlob(ctx, blobID); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion cli/command_content_stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ func (c *commandContentStats) run(ctx context.Context, rep repo.DirectRepository
sizeBuckets []uint32
)

for i := 0; i < 8; i++ {
for range 8 {
sizeBuckets = append(sizeBuckets, sizeThreshold)
sizeThreshold *= 10
}
Expand Down
2 changes: 1 addition & 1 deletion cli/command_index_inspect.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func (c *commandIndexInspect) inspectAllBlobs(ctx context.Context, rep repo.Dire

var eg errgroup.Group

for i := 0; i < c.parallel; i++ {
for range c.parallel {
eg.Go(func() error {
for bm := range indexesCh {
if err := c.inspectSingleIndexBlob(ctx, rep, bm.BlobID, output); err != nil {
Expand Down
4 changes: 1 addition & 3 deletions cli/command_index_recover.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,9 +141,7 @@ func (c *commandIndexRecover) recoverIndexesFromAllPacks(ctx context.Context, re
})

// N goroutines to recover from incoming blobs.
for i := 0; i < c.parallel; i++ {
worker := i

for worker := range c.parallel {
eg.Go(func() error {
cnt := 0

Expand Down
1 change: 0 additions & 1 deletion cli/command_policy_set_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -452,7 +452,6 @@ func TestSetSchedulingPolicyFromFlags(t *testing.T) {
expChangeCount: 0,
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
changeCount := 0

Expand Down
4 changes: 1 addition & 3 deletions cli/command_repository_sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -229,9 +229,7 @@ func (c *commandRepositorySyncTo) runSyncBlobs(ctx context.Context, src blob.Rea

tt := timetrack.Start()

for i := 0; i < c.repositorySyncParallelism; i++ {
workerID := i

for workerID := range c.repositorySyncParallelism {
eg.Go(func() error {
for m := range copyCh {
log(ctx).Debugf("[%v] Copying %v (%v bytes)...\n", workerID, m.BlobID, m.Length)
Expand Down
4 changes: 1 addition & 3 deletions cli/command_snapshot_fix_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -286,8 +286,6 @@ func TestSnapshotFix(t *testing.T) {
}

for _, tc := range cases {
tc := tc

t.Run(tc.name, func(t *testing.T) {
runner := testenv.NewInProcRunner(t)
env := testenv.NewCLITest(t, testenv.RepoFormatNotImportant, runner)
Expand Down Expand Up @@ -434,7 +432,7 @@ func mustWriteFileWithRepeatedData(t *testing.T, fname string, repeat int, data

defer f.Close()

for i := 0; i < repeat; i++ {
for range repeat {
_, err := f.Write(data)
require.NoError(t, err)
}
Expand Down
2 changes: 1 addition & 1 deletion cli/password.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func (c *App) getPasswordFromFlags(ctx context.Context, isCreate, allowPersisten

// askPass presents a given prompt and asks the user for password.
func askPass(out io.Writer, prompt string) (string, error) {
for i := 0; i < 5; i++ {
for range 5 {
fmt.Fprint(out, prompt)

passBytes, err := term.ReadPassword(int(os.Stdin.Fd()))
Expand Down
2 changes: 1 addition & 1 deletion fs/cachefs/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func (cs *cacheSource) setEntryCount(id string, cnt int) {

var fakeEntry fs.Entry

for i := 0; i < cnt; i++ {
for range cnt {
fakeEntries = append(fakeEntries, fakeEntry)
}

Expand Down
1 change: 0 additions & 1 deletion fs/ignorefs/ignorefs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -496,7 +496,6 @@ var cases = []struct {

func TestIgnoreFS(t *testing.T) {
for _, tc := range cases {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
root := setupFilesystem(tc.skipDefaultFiles)
originalFiles := walkTree(t, root)
Expand Down
2 changes: 1 addition & 1 deletion fs/localfs/local_fs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ func TestIterateNonExistent(t *testing.T) {
func testIterate(t *testing.T, nFiles int) {
tmp := testutil.TempDirectory(t)

for i := 0; i < nFiles; i++ {
for i := range nFiles {
assertNoError(t, os.WriteFile(filepath.Join(tmp, fmt.Sprintf("f%v", i)), []byte{1, 2, 3}, 0o777))
}

Expand Down
4 changes: 2 additions & 2 deletions fs/localfs/localfs_benchmark_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,15 @@ func benchmarkReadDirWithCount(b *testing.B, fileCount int) {

td := b.TempDir()

for i := 0; i < fileCount; i++ {
for range fileCount {
os.WriteFile(filepath.Join(td, uuid.NewString()), []byte{1, 2, 3, 4}, 0o644)
}

b.StartTimer()

ctx := context.Background()

for i := 0; i < b.N; i++ {
for range b.N {
dir, _ := localfs.Directory(td)
fs.IterateEntries(ctx, dir, func(context.Context, fs.Entry) error {
return nil
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
module github.com/kopia/kopia

go 1.21
go 1.22

require (
cloud.google.com/go/storage v1.40.0
Expand Down
1 change: 0 additions & 1 deletion internal/auth/authz_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,6 @@ func verifyLegacyAuthorizer(ctx context.Context, t *testing.T, rep repo.Reposito
}

for _, tc := range cases {
tc := tc
t.Run(tc.usernameAtHost, func(t *testing.T) {
a := authorizer.Authorize(ctx, rep, tc.usernameAtHost)

Expand Down
16 changes: 8 additions & 8 deletions internal/bigmap/bigmap_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ func TestGrowingMap(t *testing.T) {
h := sha256.New()

// insert 20K hashes
for i := 0; i < 20000; i++ {
for i := range 20000 {
var keybuf, valbuf, valbuf2 [sha256.Size]byte

k := sha256Key(h, keybuf[:0], i)
Expand Down Expand Up @@ -110,7 +110,7 @@ func TestGrowingSet(t *testing.T) {
h := sha256.New()

// insert 20K hashes
for i := 0; i < 20000; i++ {
for i := range 20000 {
var keybuf [sha256.Size]byte

k := sha256Key(h, keybuf[:0], i)
Expand Down Expand Up @@ -175,7 +175,7 @@ func benchmarkInternalMap(b *testing.B, m *internalMap, someVal []byte) {
keyBuf [sha256.Size]byte
)

for i := 0; i < b.N; i++ {
for i := range b.N {
// generate key=sha256(i) without allocations.
h.Reset()
binary.LittleEndian.PutUint64(num[:], uint64(i))
Expand All @@ -187,8 +187,8 @@ func benchmarkInternalMap(b *testing.B, m *internalMap, someVal []byte) {

valBuf := make([]byte, 10)

for j := 0; j < 4; j++ {
for i := 0; i < b.N; i++ {
for range 4 {
for i := range b.N {
// generate key=sha256(i) without allocations.
h.Reset()
binary.LittleEndian.PutUint64(num[:], uint64(i))
Expand Down Expand Up @@ -223,7 +223,7 @@ func benchmarkSyncMap(b *testing.B, someVal []byte) {

b.ResetTimer()

for i := 0; i < b.N; i++ {
for i := range b.N {
// generate key=sha256(i) without allocations.
h.Reset()
binary.LittleEndian.PutUint64(num[:], uint64(i))
Expand All @@ -233,8 +233,8 @@ func benchmarkSyncMap(b *testing.B, someVal []byte) {
m.Store(string(key), append([]byte{}, someVal...))
}

for j := 0; j < 4; j++ {
for i := 0; i < b.N; i++ {
for range 4 {
for i := range b.N {
// generate key=sha256(i) without allocations.
h.Reset()
binary.LittleEndian.PutUint64(num[:], uint64(i))
Expand Down
8 changes: 4 additions & 4 deletions internal/bigmap/bigmap_map_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ func TestGrowingMap(t *testing.T) {
h := sha256.New()

// insert 20K hashes
for i := 0; i < 20000; i++ {
for i := range 20000 {
var keybuf, valbuf, valbuf2 [sha256.Size]byte

k := sha256Key(h, keybuf[:0], i)
Expand Down Expand Up @@ -102,7 +102,7 @@ func benchmarkMap(b *testing.B, m *bigmap.Map, someVal []byte) {
keyBuf [sha256.Size]byte
)

for i := 0; i < b.N; i++ {
for i := range b.N {
// generate key=sha256(i) without allocations.
h.Reset()
binary.LittleEndian.PutUint64(num[:], uint64(i))
Expand All @@ -114,8 +114,8 @@ func benchmarkMap(b *testing.B, m *bigmap.Map, someVal []byte) {

valBuf := make([]byte, 10)

for j := 0; j < 4; j++ {
for i := 0; i < b.N; i++ {
for range 4 {
for i := range b.N {
// generate key=sha256(i) without allocations.
h.Reset()
binary.LittleEndian.PutUint64(num[:], uint64(i))
Expand Down

0 comments on commit 09415e0

Please sign in to comment.