diff --git a/Makefile b/Makefile index 6bf95cb308..4f54c06bf7 100644 --- a/Makefile +++ b/Makefile @@ -268,6 +268,7 @@ dev-deps: GO111MODULE=off go get -u github.com/sqs/goreturns test-with-coverage: export KOPIA_COVERAGE_TEST=1 +test-with-coverage: export GOEXPERIMENT=nocoverageredesign test-with-coverage: export TESTING_ACTION_EXE ?= $(TESTING_ACTION_EXE) test-with-coverage: $(gotestsum) $(TESTING_ACTION_EXE) $(GO_TEST) $(UNIT_TEST_RACE_FLAGS) -tags testing -count=$(REPEAT_TEST) -short -covermode=atomic -coverprofile=coverage.txt --coverpkg $(COVERAGE_PACKAGES) -timeout $(UNIT_TESTS_TIMEOUT) ./... diff --git a/cli/command_benchmark.go b/cli/command_benchmark.go index eb991d717e..9706f8b59e 100644 --- a/cli/command_benchmark.go +++ b/cli/command_benchmark.go @@ -57,11 +57,9 @@ func runInParallelNoResult[A any](args []A, run func(arg A)) { func runInParallel[A any, T any](args []A, run func(arg A) T) T { var wg sync.WaitGroup - for i := 0; i < len(args)-1; i++ { + for _, arg := range args[1:] { wg.Add(1) - arg := args[i] - go func() { defer wg.Done() @@ -70,7 +68,7 @@ func runInParallel[A any, T any](args []A, run func(arg A) T) T { } // run one on the main goroutine and N-1 in parallel. - v := run(args[len(args)-1]) + v := run(args[0]) wg.Wait() @@ -80,7 +78,7 @@ func runInParallel[A any, T any](args []A, run func(arg A) T) T { func makeOutputBuffers(n, capacity int) []*bytes.Buffer { var res []*bytes.Buffer - for i := 0; i < n; i++ { + for range n { res = append(res, bytes.NewBuffer(make([]byte, 0, capacity))) } diff --git a/cli/command_benchmark_compression.go b/cli/command_benchmark_compression.go index b6e12ac43b..e6e05c1149 100644 --- a/cli/command_benchmark_compression.go +++ b/cli/command_benchmark_compression.go @@ -186,7 +186,7 @@ func (c *commandBenchmarkCompression) runCompression(ctx context.Context, data [ input = bytes.NewReader(nil) ) - for i := 0; i < cnt; i++ { + for i := range cnt { compressed.Reset() input.Reset(data) @@ -268,7 +268,7 @@ func (c *commandBenchmarkCompression) runDecompression(ctx context.Context, data run := func(decompressed *bytes.Buffer) int64 { input := bytes.NewReader(nil) - for i := 0; i < cnt; i++ { + for range cnt { decompressed.Reset() input.Reset(compressedInputBytes) diff --git a/cli/command_benchmark_crypto.go b/cli/command_benchmark_crypto.go index 4efd169da3..468f412c18 100644 --- a/cli/command_benchmark_crypto.go +++ b/cli/command_benchmark_crypto.go @@ -97,7 +97,7 @@ func (c *commandBenchmarkCrypto) runBenchmark(ctx context.Context) []cryptoBench var encryptOutput gather.WriteBuffer defer encryptOutput.Close() - for i := 0; i < hashCount; i++ { + for range hashCount { contentID := hf(hashOutput[:0], input) if encerr := enc.Encrypt(input, contentID, &encryptOutput); encerr != nil { diff --git a/cli/command_benchmark_ecc.go b/cli/command_benchmark_ecc.go index 0daf4a853e..473ddea637 100644 --- a/cli/command_benchmark_ecc.go +++ b/cli/command_benchmark_ecc.go @@ -70,7 +70,7 @@ func (c *commandBenchmarkEcc) runBenchmark(ctx context.Context) []eccBenchResult var results []eccBenchResult data := make([]byte, c.blockSize) - for i := uint64(0); i < uint64(c.blockSize); i++ { + for i := range uint64(c.blockSize) { data[i] = byte(i%255 + 1) } @@ -99,7 +99,7 @@ func (c *commandBenchmarkEcc) runBenchmark(ctx context.Context) []eccBenchResult var tmp gather.WriteBuffer defer tmp.Close() - for i := 0; i < repeat; i++ { + for range repeat { if encerr := impl.Encrypt(input, nil, &tmp); encerr != nil { log(ctx).Errorf("encoding failed: %v", encerr) break @@ -125,7 +125,7 @@ func (c *commandBenchmarkEcc) runBenchmark(ctx context.Context) []eccBenchResult var tmp gather.WriteBuffer defer tmp.Close() - for i := 0; i < repeat; i++ { + for range repeat { if decerr := impl.Decrypt(input, nil, &tmp); decerr != nil { log(ctx).Errorf("decoding failed: %v", decerr) break diff --git a/cli/command_benchmark_encryption.go b/cli/command_benchmark_encryption.go index faac73fb20..6301005891 100644 --- a/cli/command_benchmark_encryption.go +++ b/cli/command_benchmark_encryption.go @@ -89,7 +89,7 @@ func (c *commandBenchmarkEncryption) runBenchmark(ctx context.Context) []cryptoB var encryptOutput gather.WriteBuffer defer encryptOutput.Close() - for i := 0; i < hashCount; i++ { + for range hashCount { if encerr := enc.Encrypt(input, hashOutput[:32], &encryptOutput); encerr != nil { log(ctx).Errorf("encryption failed: %v", encerr) break diff --git a/cli/command_benchmark_hashing.go b/cli/command_benchmark_hashing.go index ff0acf8ab9..5744d2c397 100644 --- a/cli/command_benchmark_hashing.go +++ b/cli/command_benchmark_hashing.go @@ -81,7 +81,7 @@ func (c *commandBenchmarkHashing) runBenchmark(ctx context.Context) []cryptoBenc runInParallelNoInputNoResult(c.parallel, func() { var hashOutput [hashing.MaxHashSize]byte - for i := 0; i < hashCount; i++ { + for range hashOutput { hf(hashOutput[:0], input) } }) diff --git a/cli/command_benchmark_splitters.go b/cli/command_benchmark_splitters.go index b5b9abcf7f..13a76f4731 100644 --- a/cli/command_benchmark_splitters.go +++ b/cli/command_benchmark_splitters.go @@ -66,7 +66,7 @@ func (c *commandBenchmarkSplitters) run(ctx context.Context) error { //nolint:fu rnd := rand.New(rand.NewSource(c.randSeed)) //nolint:gosec - for i := 0; i < c.blockCount; i++ { + for range c.blockCount { b := make([]byte, c.blockSize) if _, err := rnd.Read(b); err != nil { return errors.Wrap(err, "error generating random data") @@ -85,10 +85,9 @@ func (c *commandBenchmarkSplitters) run(ctx context.Context) error { //nolint:fu var segmentLengths []int - for _, data := range dataBlocks { + for _, d := range dataBlocks { s := fact() - d := data for len(d) > 0 { n := s.NextSplitPoint(d) if n < 0 { diff --git a/cli/command_blob_stats.go b/cli/command_blob_stats.go index fccf225692..bcce121895 100644 --- a/cli/command_blob_stats.go +++ b/cli/command_blob_stats.go @@ -34,7 +34,7 @@ func (c *commandBlobStats) run(ctx context.Context, rep repo.DirectRepository) e var sizeThresholds []int64 - for i := 0; i < 8; i++ { + for range 8 { sizeThresholds = append(sizeThresholds, sizeThreshold) countMap[sizeThreshold] = 0 sizeThreshold *= 10 diff --git a/cli/command_cache_sync.go b/cli/command_cache_sync.go index b9c1f7e579..b714cdd5d9 100644 --- a/cli/command_cache_sync.go +++ b/cli/command_cache_sync.go @@ -27,7 +27,7 @@ func (c *commandCacheSync) run(ctx context.Context, rep repo.DirectRepositoryWri ch := make(chan blob.ID, c.parallel) // workers that will prefetch blobs. - for i := 0; i < c.parallel; i++ { + for range c.parallel { eg.Go(func() error { for blobID := range ch { if err := rep.ContentManager().MetadataCache().PrefetchBlob(ctx, blobID); err != nil { diff --git a/cli/command_content_stats.go b/cli/command_content_stats.go index b280688cd0..60585e36c7 100644 --- a/cli/command_content_stats.go +++ b/cli/command_content_stats.go @@ -38,7 +38,7 @@ func (c *commandContentStats) run(ctx context.Context, rep repo.DirectRepository sizeBuckets []uint32 ) - for i := 0; i < 8; i++ { + for range 8 { sizeBuckets = append(sizeBuckets, sizeThreshold) sizeThreshold *= 10 } diff --git a/cli/command_index_inspect.go b/cli/command_index_inspect.go index e25c6db44b..948b4673c9 100644 --- a/cli/command_index_inspect.go +++ b/cli/command_index_inspect.go @@ -91,7 +91,7 @@ func (c *commandIndexInspect) inspectAllBlobs(ctx context.Context, rep repo.Dire var eg errgroup.Group - for i := 0; i < c.parallel; i++ { + for range c.parallel { eg.Go(func() error { for bm := range indexesCh { if err := c.inspectSingleIndexBlob(ctx, rep, bm.BlobID, output); err != nil { diff --git a/cli/command_index_recover.go b/cli/command_index_recover.go index 6eba78a2af..c0a73cbca6 100644 --- a/cli/command_index_recover.go +++ b/cli/command_index_recover.go @@ -141,9 +141,7 @@ func (c *commandIndexRecover) recoverIndexesFromAllPacks(ctx context.Context, re }) // N goroutines to recover from incoming blobs. - for i := 0; i < c.parallel; i++ { - worker := i - + for worker := range c.parallel { eg.Go(func() error { cnt := 0 diff --git a/cli/command_policy_set_test.go b/cli/command_policy_set_test.go index efe1c0c216..04e63a1478 100644 --- a/cli/command_policy_set_test.go +++ b/cli/command_policy_set_test.go @@ -452,7 +452,6 @@ func TestSetSchedulingPolicyFromFlags(t *testing.T) { expChangeCount: 0, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { changeCount := 0 diff --git a/cli/command_repository_sync.go b/cli/command_repository_sync.go index c66105d3c4..47448be333 100644 --- a/cli/command_repository_sync.go +++ b/cli/command_repository_sync.go @@ -229,9 +229,7 @@ func (c *commandRepositorySyncTo) runSyncBlobs(ctx context.Context, src blob.Rea tt := timetrack.Start() - for i := 0; i < c.repositorySyncParallelism; i++ { - workerID := i - + for workerID := range c.repositorySyncParallelism { eg.Go(func() error { for m := range copyCh { log(ctx).Debugf("[%v] Copying %v (%v bytes)...\n", workerID, m.BlobID, m.Length) diff --git a/cli/command_snapshot_fix_test.go b/cli/command_snapshot_fix_test.go index 9f3d2c0e81..f57d54e2a6 100644 --- a/cli/command_snapshot_fix_test.go +++ b/cli/command_snapshot_fix_test.go @@ -286,8 +286,6 @@ func TestSnapshotFix(t *testing.T) { } for _, tc := range cases { - tc := tc - t.Run(tc.name, func(t *testing.T) { runner := testenv.NewInProcRunner(t) env := testenv.NewCLITest(t, testenv.RepoFormatNotImportant, runner) @@ -434,7 +432,7 @@ func mustWriteFileWithRepeatedData(t *testing.T, fname string, repeat int, data defer f.Close() - for i := 0; i < repeat; i++ { + for range repeat { _, err := f.Write(data) require.NoError(t, err) } diff --git a/cli/password.go b/cli/password.go index d7540f841e..fab110f17b 100644 --- a/cli/password.go +++ b/cli/password.go @@ -94,7 +94,7 @@ func (c *App) getPasswordFromFlags(ctx context.Context, isCreate, allowPersisten // askPass presents a given prompt and asks the user for password. func askPass(out io.Writer, prompt string) (string, error) { - for i := 0; i < 5; i++ { + for range 5 { fmt.Fprint(out, prompt) passBytes, err := term.ReadPassword(int(os.Stdin.Fd())) diff --git a/fs/cachefs/cache_test.go b/fs/cachefs/cache_test.go index f0e48068d3..cad706fe6f 100644 --- a/fs/cachefs/cache_test.go +++ b/fs/cachefs/cache_test.go @@ -46,7 +46,7 @@ func (cs *cacheSource) setEntryCount(id string, cnt int) { var fakeEntry fs.Entry - for i := 0; i < cnt; i++ { + for range cnt { fakeEntries = append(fakeEntries, fakeEntry) } diff --git a/fs/ignorefs/ignorefs_test.go b/fs/ignorefs/ignorefs_test.go index 8a02071416..6dec83aa55 100644 --- a/fs/ignorefs/ignorefs_test.go +++ b/fs/ignorefs/ignorefs_test.go @@ -496,7 +496,6 @@ var cases = []struct { func TestIgnoreFS(t *testing.T) { for _, tc := range cases { - tc := tc t.Run(tc.desc, func(t *testing.T) { root := setupFilesystem(tc.skipDefaultFiles) originalFiles := walkTree(t, root) diff --git a/fs/localfs/local_fs_test.go b/fs/localfs/local_fs_test.go index 0c66eb71cf..4584881913 100644 --- a/fs/localfs/local_fs_test.go +++ b/fs/localfs/local_fs_test.go @@ -157,7 +157,7 @@ func TestIterateNonExistent(t *testing.T) { func testIterate(t *testing.T, nFiles int) { tmp := testutil.TempDirectory(t) - for i := 0; i < nFiles; i++ { + for i := range nFiles { assertNoError(t, os.WriteFile(filepath.Join(tmp, fmt.Sprintf("f%v", i)), []byte{1, 2, 3}, 0o777)) } diff --git a/fs/localfs/localfs_benchmark_test.go b/fs/localfs/localfs_benchmark_test.go index d185b002c3..cdfaf241e5 100644 --- a/fs/localfs/localfs_benchmark_test.go +++ b/fs/localfs/localfs_benchmark_test.go @@ -47,7 +47,7 @@ func benchmarkReadDirWithCount(b *testing.B, fileCount int) { td := b.TempDir() - for i := 0; i < fileCount; i++ { + for range fileCount { os.WriteFile(filepath.Join(td, uuid.NewString()), []byte{1, 2, 3, 4}, 0o644) } @@ -55,7 +55,7 @@ func benchmarkReadDirWithCount(b *testing.B, fileCount int) { ctx := context.Background() - for i := 0; i < b.N; i++ { + for range b.N { dir, _ := localfs.Directory(td) fs.IterateEntries(ctx, dir, func(context.Context, fs.Entry) error { return nil diff --git a/go.mod b/go.mod index fd3e0e07fa..9ea64db2c5 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/kopia/kopia -go 1.21 +go 1.22 require ( cloud.google.com/go/storage v1.40.0 diff --git a/internal/auth/authz_test.go b/internal/auth/authz_test.go index 931c7ab753..bbba593cd3 100644 --- a/internal/auth/authz_test.go +++ b/internal/auth/authz_test.go @@ -168,7 +168,6 @@ func verifyLegacyAuthorizer(ctx context.Context, t *testing.T, rep repo.Reposito } for _, tc := range cases { - tc := tc t.Run(tc.usernameAtHost, func(t *testing.T) { a := authorizer.Authorize(ctx, rep, tc.usernameAtHost) diff --git a/internal/bigmap/bigmap_internal_test.go b/internal/bigmap/bigmap_internal_test.go index 6c3b868a7d..2b1bdd90ea 100644 --- a/internal/bigmap/bigmap_internal_test.go +++ b/internal/bigmap/bigmap_internal_test.go @@ -67,7 +67,7 @@ func TestGrowingMap(t *testing.T) { h := sha256.New() // insert 20K hashes - for i := 0; i < 20000; i++ { + for i := range 20000 { var keybuf, valbuf, valbuf2 [sha256.Size]byte k := sha256Key(h, keybuf[:0], i) @@ -110,7 +110,7 @@ func TestGrowingSet(t *testing.T) { h := sha256.New() // insert 20K hashes - for i := 0; i < 20000; i++ { + for i := range 20000 { var keybuf [sha256.Size]byte k := sha256Key(h, keybuf[:0], i) @@ -175,7 +175,7 @@ func benchmarkInternalMap(b *testing.B, m *internalMap, someVal []byte) { keyBuf [sha256.Size]byte ) - for i := 0; i < b.N; i++ { + for i := range b.N { // generate key=sha256(i) without allocations. h.Reset() binary.LittleEndian.PutUint64(num[:], uint64(i)) @@ -187,8 +187,8 @@ func benchmarkInternalMap(b *testing.B, m *internalMap, someVal []byte) { valBuf := make([]byte, 10) - for j := 0; j < 4; j++ { - for i := 0; i < b.N; i++ { + for range 4 { + for i := range b.N { // generate key=sha256(i) without allocations. h.Reset() binary.LittleEndian.PutUint64(num[:], uint64(i)) @@ -223,7 +223,7 @@ func benchmarkSyncMap(b *testing.B, someVal []byte) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { // generate key=sha256(i) without allocations. h.Reset() binary.LittleEndian.PutUint64(num[:], uint64(i)) @@ -233,8 +233,8 @@ func benchmarkSyncMap(b *testing.B, someVal []byte) { m.Store(string(key), append([]byte{}, someVal...)) } - for j := 0; j < 4; j++ { - for i := 0; i < b.N; i++ { + for range 4 { + for i := range b.N { // generate key=sha256(i) without allocations. h.Reset() binary.LittleEndian.PutUint64(num[:], uint64(i)) diff --git a/internal/bigmap/bigmap_map_test.go b/internal/bigmap/bigmap_map_test.go index b72d7bd5b2..eab826ebcb 100644 --- a/internal/bigmap/bigmap_map_test.go +++ b/internal/bigmap/bigmap_map_test.go @@ -28,7 +28,7 @@ func TestGrowingMap(t *testing.T) { h := sha256.New() // insert 20K hashes - for i := 0; i < 20000; i++ { + for i := range 20000 { var keybuf, valbuf, valbuf2 [sha256.Size]byte k := sha256Key(h, keybuf[:0], i) @@ -102,7 +102,7 @@ func benchmarkMap(b *testing.B, m *bigmap.Map, someVal []byte) { keyBuf [sha256.Size]byte ) - for i := 0; i < b.N; i++ { + for i := range b.N { // generate key=sha256(i) without allocations. h.Reset() binary.LittleEndian.PutUint64(num[:], uint64(i)) @@ -114,8 +114,8 @@ func benchmarkMap(b *testing.B, m *bigmap.Map, someVal []byte) { valBuf := make([]byte, 10) - for j := 0; j < 4; j++ { - for i := 0; i < b.N; i++ { + for range 4 { + for i := range b.N { // generate key=sha256(i) without allocations. h.Reset() binary.LittleEndian.PutUint64(num[:], uint64(i)) diff --git a/internal/bigmap/bigmap_set_test.go b/internal/bigmap/bigmap_set_test.go index 12bada2326..d913df531b 100644 --- a/internal/bigmap/bigmap_set_test.go +++ b/internal/bigmap/bigmap_set_test.go @@ -28,7 +28,7 @@ func TestGrowingSet(t *testing.T) { h := sha256.New() // insert 20K hashes - for i := 0; i < 20000; i++ { + for i := range 20000 { var keybuf [sha256.Size]byte k := sha256Key(h, keybuf[:0], i) @@ -63,7 +63,7 @@ func BenchmarkSet(b *testing.B) { keyBuf [sha256.Size]byte ) - for i := 0; i < b.N; i++ { + for i := range b.N { // generate key=sha256(i) without allocations. h.Reset() binary.LittleEndian.PutUint64(num[:], uint64(i)) @@ -73,8 +73,8 @@ func BenchmarkSet(b *testing.B) { m.Put(ctx, key) } - for j := 0; j < 4; j++ { - for i := 0; i < b.N; i++ { + for range 4 { + for i := range b.N { // generate key=sha256(i) without allocations. h.Reset() binary.LittleEndian.PutUint64(num[:], uint64(i)) diff --git a/internal/bigmap/bigmapbench/main.go b/internal/bigmap/bigmapbench/main.go index d2594f47b8..ab3f57495d 100644 --- a/internal/bigmap/bigmapbench/main.go +++ b/internal/bigmap/bigmapbench/main.go @@ -79,7 +79,7 @@ func main() { t0 := clock.Now() - for i := 0; i < 300_000_000; i++ { + for i := range 300_000_000 { if i%1_000_000 == 0 && i > 0 { var ms runtime.MemStats diff --git a/internal/blobtesting/concurrent.go b/internal/blobtesting/concurrent.go index a7ed2e3fcd..482bfd46b1 100644 --- a/internal/blobtesting/concurrent.go +++ b/internal/blobtesting/concurrent.go @@ -40,7 +40,7 @@ func VerifyConcurrentAccess(t *testing.T, st blob.Storage, options ConcurrentAcc // generate random blob IDs for the pool var blobs []blob.ID - for i := 0; i < options.NumBlobs; i++ { + for range options.NumBlobs { blobIDBytes := make([]byte, 32) cryptorand.Read(blobIDBytes) blobs = append(blobs, blob.ID(hex.EncodeToString(blobIDBytes))) @@ -53,12 +53,12 @@ func VerifyConcurrentAccess(t *testing.T, st blob.Storage, options ConcurrentAcc eg, ctx := errgroup.WithContext(testlogging.Context(t)) // start readers that will be reading random blob out of the pool - for i := 0; i < options.Getters; i++ { + for range options.Getters { eg.Go(func() error { var data gather.WriteBuffer defer data.Close() - for i := 0; i < options.Iterations; i++ { + for range options.Iterations { blobID := randomBlobID() offset := int64(0) length := int64(-1) @@ -88,9 +88,9 @@ func VerifyConcurrentAccess(t *testing.T, st blob.Storage, options ConcurrentAcc } // start putters that will be writing random blob out of the pool - for i := 0; i < options.Putters; i++ { + for range options.Putters { eg.Go(func() error { - for i := 0; i < options.Iterations; i++ { + for range options.Iterations { blobID := randomBlobID() data := fmt.Sprintf("%v-%v", blobID, rand.Int63()) err := st.PutBlob(ctx, blobID, gather.FromSlice([]byte(data)), blob.PutOptions{}) @@ -104,9 +104,9 @@ func VerifyConcurrentAccess(t *testing.T, st blob.Storage, options ConcurrentAcc } // start deleters that will be deleting random blob out of the pool - for i := 0; i < options.Deleters; i++ { + for range options.Deleters { eg.Go(func() error { - for i := 0; i < options.Iterations; i++ { + for range options.Iterations { blobID := randomBlobID() err := st.DeleteBlob(ctx, blobID) switch { @@ -126,9 +126,9 @@ func VerifyConcurrentAccess(t *testing.T, st blob.Storage, options ConcurrentAcc } // start listers that will be listing blobs by random prefixes of existing objects. - for i := 0; i < options.Listers; i++ { + for range options.Listers { eg.Go(func() error { - for i := 0; i < options.Iterations; i++ { + for range options.Iterations { blobID := randomBlobID() prefix := blobID[0:rand.Intn(len(blobID))] if rand.Intn(100) < options.NonExistentListPrefixPercentage { diff --git a/internal/blobtesting/verify.go b/internal/blobtesting/verify.go index 7cf334f412..8c24bf6ffd 100644 --- a/internal/blobtesting/verify.go +++ b/internal/blobtesting/verify.go @@ -34,8 +34,6 @@ func VerifyStorage(ctx context.Context, t *testing.T, r blob.Storage, opts blob. // First verify that blocks don't exist. t.Run("VerifyBlobsNotFound", func(t *testing.T) { for _, b := range blocks { - b := b - t.Run(string(b.blk), func(t *testing.T) { t.Parallel() @@ -57,9 +55,7 @@ func VerifyStorage(ctx context.Context, t *testing.T, r blob.Storage, opts blob. // Now add blocks. t.Run("AddBlobs", func(t *testing.T) { for _, b := range blocks { - for i := 0; i < initialAddConcurrency; i++ { - b := b - + for i := range initialAddConcurrency { t.Run(fmt.Sprintf("%v-%v", b.blk, i), func(t *testing.T) { t.Parallel() @@ -73,8 +69,6 @@ func VerifyStorage(ctx context.Context, t *testing.T, r blob.Storage, opts blob. t.Run("GetBlobs", func(t *testing.T) { for _, b := range blocks { - b := b - t.Run(string(b.blk), func(t *testing.T) { t.Parallel() @@ -112,8 +106,6 @@ func VerifyStorage(ctx context.Context, t *testing.T, r blob.Storage, opts blob. newContents := []byte{99} for _, b := range blocks { - b := b - t.Run(string(b.blk), func(t *testing.T) { t.Parallel() err := r.PutBlob(ctx, b.blk, gather.FromSlice(newContents), opts) @@ -150,8 +142,6 @@ func VerifyStorage(ctx context.Context, t *testing.T, r blob.Storage, opts blob. t.Run("PutBlobsWithSetTime", func(t *testing.T) { for _, b := range blocks { - b := b - t.Run(string(b.blk), func(t *testing.T) { t.Parallel() @@ -181,8 +171,6 @@ func VerifyStorage(ctx context.Context, t *testing.T, r blob.Storage, opts blob. t.Run("PutBlobsWithGetTime", func(t *testing.T) { for _, b := range blocks { - b := b - t.Run(string(b.blk), func(t *testing.T) { t.Parallel() diff --git a/internal/cache/content_cache_concurrency_test.go b/internal/cache/content_cache_concurrency_test.go index 9b42cf0993..ba8a2e460a 100644 --- a/internal/cache/content_cache_concurrency_test.go +++ b/internal/cache/content_cache_concurrency_test.go @@ -176,9 +176,7 @@ func testGetContentForDifferentContentIDsExecutesInParallel(t *testing.T, newCac var wg sync.WaitGroup - for i := 0; i < 20; i++ { - i := i - + for i := range 20 { wg.Add(1) go func() { @@ -212,7 +210,7 @@ func testGetContentForDifferentBlobsExecutesInParallel(t *testing.T, newCache ne defer dataCache.Close(ctx) - for i := 0; i < 100; i++ { + for i := range 100 { require.NoError(t, underlying.PutBlob(ctx, blob.ID(fmt.Sprintf("blob%v", i)), gather.FromSlice([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}), blob.PutOptions{})) } @@ -226,9 +224,7 @@ func testGetContentForDifferentBlobsExecutesInParallel(t *testing.T, newCache ne var wg sync.WaitGroup - for i := 0; i < 20; i++ { - i := i - + for i := range 20 { wg.Add(1) go func() { @@ -275,7 +271,7 @@ func testGetContentRaceFetchesOnce(t *testing.T, newCache newContentCacheFunc) { var wg sync.WaitGroup - for i := 0; i < 20; i++ { + for range 20 { wg.Add(1) go func() { diff --git a/internal/cache/content_cache_test.go b/internal/cache/content_cache_test.go index 09be374a3f..db859ca0db 100644 --- a/internal/cache/content_cache_test.go +++ b/internal/cache/content_cache_test.go @@ -326,7 +326,7 @@ func TestCacheFailureToRead(t *testing.T) { var v gather.WriteBuffer defer v.Close() - for i := 0; i < 2; i++ { + for range 2 { require.NoError(t, cc.GetContent(ctx, "aa", "content-1", 0, 3, &v)) got, want := v.ToByteSlice(), []byte{1, 2, 3} diff --git a/internal/epoch/epoch_advance_test.go b/internal/epoch/epoch_advance_test.go index bf92f78bce..d9226cdbe4 100644 --- a/internal/epoch/epoch_advance_test.go +++ b/internal/epoch/epoch_advance_test.go @@ -19,7 +19,7 @@ func TestShouldAdvanceEpoch(t *testing.T) { Timestamp: t0, Length: 1, }) - for i := 0; i < def.EpochAdvanceOnCountThreshold; i++ { + for range def.EpochAdvanceOnCountThreshold { lotsOfMetadata = append(lotsOfMetadata, blob.Metadata{ Timestamp: t0.Add(def.MinEpochDuration), Length: 1, diff --git a/internal/epoch/epoch_manager.go b/internal/epoch/epoch_manager.go index b22b8bc7b3..7724077b3a 100644 --- a/internal/epoch/epoch_manager.go +++ b/internal/epoch/epoch_manager.go @@ -687,7 +687,6 @@ func (e *Manager) loadUncompactedEpochs(ctx context.Context, min, max int) (map[ eg, ctx := errgroup.WithContext(ctx) for n := min; n <= max; n++ { - n := n if n < 0 { continue } @@ -995,8 +994,7 @@ func (e *Manager) getCompleteIndexSetForCommittedState(ctx context.Context, cs C tmp := make([][]blob.Metadata, cnt) - for i := 0; i < cnt; i++ { - i := i + for i := range cnt { ep := i + startEpoch eg.Go(func() error { diff --git a/internal/epoch/epoch_manager_test.go b/internal/epoch/epoch_manager_test.go index 3926da97ea..9b4c37ca60 100644 --- a/internal/epoch/epoch_manager_test.go +++ b/internal/epoch/epoch_manager_test.go @@ -152,7 +152,6 @@ func TestIndexEpochManager_Parallel(t *testing.T) { endTimeReal := clock.Now().Add(30 * time.Second) for worker := 1; worker <= 5; worker++ { - worker := worker te2 := te.another() indexNum := 1e6 * worker @@ -355,8 +354,8 @@ func TestIndexEpochManager_NoCompactionInReadOnly(t *testing.T) { // attempt to compact things and advance the epoch. We want to write exactly // the number of blobs that will cause it to advance so we can keep track of // which epoch we're on and everything. - for j := 0; j < 10; j++ { - for i := 0; i < p.GetEpochAdvanceOnCountThreshold(); i++ { + for range 10 { + for i := range p.GetEpochAdvanceOnCountThreshold() { // Advance the time so that the difference in times for writes will force // new epochs. te.ft.Advance(48 * time.Hour) @@ -430,8 +429,8 @@ func TestNoEpochAdvanceOnIndexRead(t *testing.T) { // indexes it should attempt to advance the epoch. // Write exactly the number of index blobs that will cause it to advance so // we can keep track of which one is the current epoch. - for j := 0; j < epochs; j++ { - for i := 0; i < count-1; i++ { + for range epochs { + for i := range count - 1 { te.mustWriteIndexFiles(ctx, t, newFakeIndexWithEntries(i)) } @@ -648,7 +647,7 @@ func TestMaybeAdvanceEpoch(t *testing.T) { idxCount := p.GetEpochAdvanceOnCountThreshold() // Create sufficient indexes blobs and move clock forward to advance epoch. - for i := 0; i < idxCount; i++ { + for i := range idxCount { te.mustWriteIndexFiles(ctx, t, newFakeIndexWithEntries(i)) } @@ -722,7 +721,7 @@ func TestMaybeAdvanceEpoch_Error(t *testing.T) { idxCount := p.GetEpochAdvanceOnCountThreshold() // Create sufficient indexes blobs and move clock forward to advance epoch. - for i := 0; i < idxCount; i++ { + for i := range idxCount { te.mustWriteIndexFiles(ctx, t, newFakeIndexWithEntries(i)) } @@ -921,8 +920,8 @@ func TestMaybeCompactSingleEpoch_CompactionError(t *testing.T) { idxCount := p.GetEpochAdvanceOnCountThreshold() // Create sufficient indexes blobs and move clock forward to advance epoch. - for j := 0; j < 4; j++ { - for i := 0; i < idxCount; i++ { + for range 4 { + for i := range idxCount { if i == idxCount-1 { // Advance the time so that the difference in times for writes will force // new epochs. @@ -963,8 +962,8 @@ func TestMaybeCompactSingleEpoch(t *testing.T) { var k int // Create sufficient indexes blobs and move clock forward to advance current epoch - for j := 0; j < epochsToWrite; j++ { - for i := 0; i < idxCount; i++ { + for j := range epochsToWrite { + for i := range idxCount { if i == idxCount-1 { // Advance the time so that the difference in times for writes will force // new epochs. @@ -998,7 +997,7 @@ func TestMaybeCompactSingleEpoch(t *testing.T) { // perform single-epoch compaction for settled epochs newestEpochToCompact := cs.WriteEpoch - numUnsettledEpochs + 1 - for j := 0; j < newestEpochToCompact; j++ { + for j := range newestEpochToCompact { err = te.mgr.MaybeCompactSingleEpoch(ctx) require.NoError(t, err) @@ -1085,8 +1084,8 @@ func TestMaybeGenerateRangeCheckpoint_CompactionError(t *testing.T) { var k int // Create sufficient indexes blobs and move clock forward to advance epoch. - for j := 0; j < epochsToWrite; j++ { - for i := 0; i < idxCount; i++ { + for range epochsToWrite { + for i := range idxCount { if i == idxCount-1 { // Advance the time so that the difference in times for writes will force // new epochs. @@ -1135,8 +1134,8 @@ func TestMaybeGenerateRangeCheckpoint_FromUncompactedEpochs(t *testing.T) { epochsToWrite := p.FullCheckpointFrequency + 3 idxCount := p.GetEpochAdvanceOnCountThreshold() // Create sufficient indexes blobs and move clock forward to advance epoch. - for j := 0; j < epochsToWrite; j++ { - for i := 0; i < idxCount; i++ { + for range epochsToWrite { + for i := range idxCount { if i == idxCount-1 { // Advance the time so that the difference in times for writes will force // new epochs. @@ -1187,8 +1186,8 @@ func TestMaybeGenerateRangeCheckpoint_FromCompactedEpochs(t *testing.T) { epochsToWrite := p.FullCheckpointFrequency + 3 idxCount := p.GetEpochAdvanceOnCountThreshold() // Create sufficient indexes blobs and move clock forward to advance epoch. - for j := 0; j < epochsToWrite; j++ { - for i := 0; i < idxCount; i++ { + for range epochsToWrite { + for i := range idxCount { if i == idxCount-1 { // Advance the time so that the difference in times for writes will force // new epochs. @@ -1212,7 +1211,7 @@ func TestMaybeGenerateRangeCheckpoint_FromCompactedEpochs(t *testing.T) { // perform single-epoch compaction for settled epochs newestEpochToCompact := cs.WriteEpoch - numUnsettledEpochs + 1 - for j := 0; j < newestEpochToCompact; j++ { + for j := range newestEpochToCompact { err = te.mgr.MaybeCompactSingleEpoch(ctx) require.NoError(t, err) @@ -1404,7 +1403,7 @@ func TestCleanupMarkers_CleanUpManyMarkers(t *testing.T) { const epochsToAdvance = 5 te.mustWriteIndexFiles(ctx, t, newFakeIndexWithEntries(0)) - for i := 0; i < epochsToAdvance; i++ { + for i := range epochsToAdvance { te.ft.Advance(p.MinEpochDuration + 1*time.Hour) te.mgr.forceAdvanceEpoch(ctx) te.mustWriteIndexFiles(ctx, t, newFakeIndexWithEntries(i+1)) diff --git a/internal/faketime/faketime_test.go b/internal/faketime/faketime_test.go index 3d70fe3fd3..089c1f55b4 100644 --- a/internal/faketime/faketime_test.go +++ b/internal/faketime/faketime_test.go @@ -18,7 +18,7 @@ func TestFrozen(t *testing.T) { for _, tm := range times { timeNow := Frozen(tm) - for i := 0; i < 5; i++ { + for range 5 { if want, got := tm, timeNow(); got != want { t.Fatalf("Invalid frozen time, got: %v, want: %v", got, want) } @@ -40,13 +40,13 @@ func TestAutoAdvance(t *testing.T) { wg.Add(goRoutinesCount) - for i := 0; i < goRoutinesCount; i++ { + for range goRoutinesCount { go func() { defer wg.Done() times := make([]time.Time, iterations) - for j := 0; j < iterations; j++ { + for j := range iterations { times[j] = timeNow() } @@ -108,7 +108,7 @@ func TestTimeAdvanceConcurrent(t *testing.T) { wg.Add(parallelism) - for i := 0; i < parallelism; i++ { + for range parallelism { go func() { defer wg.Done() @@ -116,7 +116,7 @@ func TestTimeAdvanceConcurrent(t *testing.T) { var prev time.Time - for j := 0; j < iterations; j++ { + for j := range iterations { if advanceProbability > rand.Float64() { ta.Advance(17 * time.Second) } diff --git a/internal/gather/gather_write_buffer_test.go b/internal/gather/gather_write_buffer_test.go index b1208d3410..5ccfdbfc4d 100644 --- a/internal/gather/gather_write_buffer_test.go +++ b/internal/gather/gather_write_buffer_test.go @@ -113,7 +113,7 @@ func TestGatherWriteBufferMax(t *testing.T) { defer b.Close() // write 1Mx5 bytes - for i := 0; i < 1000000; i++ { + for range 1000000 { b.Append([]byte("hello")) } @@ -121,7 +121,7 @@ func TestGatherWriteBufferMax(t *testing.T) { require.Len(t, b.Bytes().Slices, 1) // write 10Mx5 bytes - for i := 0; i < 10000000; i++ { + for range 10000000 { b.Append([]byte("hello")) } diff --git a/internal/logfile/logfile_test.go b/internal/logfile/logfile_test.go index 56547ac561..a82621a67d 100644 --- a/internal/logfile/logfile_test.go +++ b/internal/logfile/logfile_test.go @@ -121,7 +121,6 @@ func TestLogFileRotation(t *testing.T) { for subdir, wantEntryCount := range subdirs { logSubdir := filepath.Join(tmpLogDir, subdir) - wantEntryCount := wantEntryCount t.Run(subdir, func(t *testing.T) { entries, err := os.ReadDir(logSubdir) @@ -177,7 +176,6 @@ func TestLogFileMaxTotalSize(t *testing.T) { for subdir, flag := range subdirFlags { logSubdir := filepath.Join(tmpLogDir, subdir) - flag := flag t.Run(subdir, func(t *testing.T) { size0 := getTotalDirSize(t, logSubdir) diff --git a/internal/metrics/metrics_distribution_test.go b/internal/metrics/metrics_distribution_test.go index d38393a309..9ff0b8ef46 100644 --- a/internal/metrics/metrics_distribution_test.go +++ b/internal/metrics/metrics_distribution_test.go @@ -13,7 +13,7 @@ func TestBucketForThresholds(t *testing.T) { assert.Equal(t, 0, bucketForThresholds(buckets, buckets[0]-1)) - for i := 0; i < n; i++ { + for i := range n { assert.Equal(t, i, bucketForThresholds(buckets, buckets[i]-1)) assert.Equal(t, i, bucketForThresholds(buckets, buckets[i])) assert.Equal(t, i+1, bucketForThresholds(buckets, buckets[i]+1), "looking for %v", buckets[i]+1) diff --git a/internal/parallelwork/parallel_work_queue.go b/internal/parallelwork/parallel_work_queue.go index 6da72824d9..da5d048782 100644 --- a/internal/parallelwork/parallel_work_queue.go +++ b/internal/parallelwork/parallel_work_queue.go @@ -63,7 +63,7 @@ func (v *Queue) Process(ctx context.Context, workers int) error { eg, ctx := errgroup.WithContext(ctx) - for i := 0; i < workers; i++ { + for range workers { eg.Go(func() error { for { select { diff --git a/internal/parallelwork/parallel_work_queue_test.go b/internal/parallelwork/parallel_work_queue_test.go index f46db288e7..38e9524480 100644 --- a/internal/parallelwork/parallel_work_queue_test.go +++ b/internal/parallelwork/parallel_work_queue_test.go @@ -176,7 +176,7 @@ func TestOnNthCompletion(t *testing.T) { onNthCompletion := parallelwork.OnNthCompletion(n, callback) // before n-th invocation - for i := 0; i < n-1; i++ { + for range n - 1 { err := onNthCompletion() require.NoError(t, err) require.Equal(t, 0, callbackInvoked) @@ -211,7 +211,7 @@ func TestOnNthCompletion(t *testing.T) { wg.Add(n + 1) - for i := 0; i < n+1; i++ { + for range n + 1 { go func() { results <- onNthCompletion() wg.Done() diff --git a/internal/providervalidation/providervalidation.go b/internal/providervalidation/providervalidation.go index 53e5d68870..5db215d3e1 100644 --- a/internal/providervalidation/providervalidation.go +++ b/internal/providervalidation/providervalidation.go @@ -459,19 +459,19 @@ func (c *concurrencyTest) listBlobWorker(ctx context.Context, worker int) func() func (c *concurrencyTest) run(ctx context.Context) error { eg, ctx := errgroup.WithContext(ctx) - for worker := 0; worker < c.opt.NumPutBlobWorkers; worker++ { + for worker := range c.opt.NumPutBlobWorkers { eg.Go(c.putBlobWorker(ctx, worker)) } - for worker := 0; worker < c.opt.NumGetBlobWorkers; worker++ { + for worker := range c.opt.NumGetBlobWorkers { eg.Go(c.getBlobWorker(ctx, worker)) } - for worker := 0; worker < c.opt.NumGetMetadataWorkers; worker++ { + for worker := range c.opt.NumGetMetadataWorkers { eg.Go(c.getMetadataWorker(ctx, worker)) } - for worker := 0; worker < c.opt.NumListBlobsWorkers; worker++ { + for worker := range c.opt.NumListBlobsWorkers { eg.Go(c.listBlobWorker(ctx, worker)) } diff --git a/internal/repodiag/log_manager_test.go b/internal/repodiag/log_manager_test.go index ecf83bef7e..62e83efdc3 100644 --- a/internal/repodiag/log_manager_test.go +++ b/internal/repodiag/log_manager_test.go @@ -49,7 +49,7 @@ func TestLogManager_AutoFlush(t *testing.T) { // flush happens after 4 << 20 bytes (4MB) after compression, // write ~10MB of base16 data which compresses to ~5MB and writes 1 blob - for i := 0; i < 5000; i++ { + for range 5000 { var b [1024]byte rand.Read(b[:]) diff --git a/internal/retry/retry_test.go b/internal/retry/retry_test.go index 089f0c5368..6f2f2c3924 100644 --- a/internal/retry/retry_test.go +++ b/internal/retry/retry_test.go @@ -45,7 +45,6 @@ func TestRetry(t *testing.T) { ctx := testlogging.Context(t) for _, tc := range cases { - tc := tc t.Run(tc.desc, func(t *testing.T) { t.Parallel() diff --git a/internal/scrubber/scrub_sensitive.go b/internal/scrubber/scrub_sensitive.go index f67c5e1562..bacf553351 100644 --- a/internal/scrubber/scrub_sensitive.go +++ b/internal/scrubber/scrub_sensitive.go @@ -16,7 +16,7 @@ func ScrubSensitiveData(v reflect.Value) reflect.Value { case reflect.Struct: res := reflect.New(v.Type()).Elem() - for i := 0; i < v.NumField(); i++ { + for i := range v.NumField() { fv := v.Field(i) sf := v.Type().Field(i) diff --git a/internal/server/api_policies.go b/internal/server/api_policies.go index 337983f645..0a80f1e868 100644 --- a/internal/server/api_policies.go +++ b/internal/server/api_policies.go @@ -95,7 +95,7 @@ func handlePolicyResolve(ctx context.Context, rc requestContext) (interface{}, * now := clock.Now().Local() - for i := 0; i < req.NumUpcomingSnapshotTimes; i++ { + for range req.NumUpcomingSnapshotTimes { st, ok := resp.Effective.SchedulingPolicy.NextSnapshotTime(now, now) if !ok { break diff --git a/internal/server/api_sources_test.go b/internal/server/api_sources_test.go index 2fddfa062b..db0892ce22 100644 --- a/internal/server/api_sources_test.go +++ b/internal/server/api_sources_test.go @@ -144,7 +144,7 @@ func TestSourceRefreshesAfterPolicy(t *testing.T) { // make sure that soon after setting policy, the next snapshot time is up-to-date. match := false - for attempt := 0; attempt < 15; attempt++ { + for range 15 { sources = mustListSources(t, cli, &snapshot.SourceInfo{}) require.Len(t, sources, 1) require.NotNil(t, sources[0].NextSnapshotTime) diff --git a/internal/server/grpc_session.go b/internal/server/grpc_session.go index b0b4a53ee9..1c16c96f5d 100644 --- a/internal/server/grpc_session.go +++ b/internal/server/grpc_session.go @@ -114,8 +114,6 @@ func (s *Server) Session(srv grpcapi.KopiaRepository_SessionServer) error { lastErr := make(chan error, 1) for req, err := srv.Recv(); err == nil; req, err = srv.Recv() { - req := req - // propagate any error from the goroutines select { case err := <-lastErr: diff --git a/internal/server/server_authz_checks_test.go b/internal/server/server_authz_checks_test.go index c6a752c018..2f06499100 100644 --- a/internal/server/server_authz_checks_test.go +++ b/internal/server/server_authz_checks_test.go @@ -78,8 +78,6 @@ func TestValidateCSRFToken(t *testing.T) { ctx := context.Background() for i, tc := range cases { - tc := tc - t.Run(fmt.Sprintf("case-%v", i), func(t *testing.T) { req, err := http.NewRequestWithContext(ctx, http.MethodGet, "/somepath", http.NoBody) require.NoError(t, err) diff --git a/internal/server/server_test.go b/internal/server/server_test.go index 53171d6d2b..829d04d425 100644 --- a/internal/server/server_test.go +++ b/internal/server/server_test.go @@ -121,9 +121,6 @@ func TestServerUIAccessDeniedToRemoteUser(t *testing.T) { } for urlSuffix, wantStatus := range getUrls { - urlSuffix := urlSuffix - wantStatus := wantStatus - t.Run(urlSuffix, func(t *testing.T) { var hsr apiclient.HTTPStatusError diff --git a/internal/testutil/testutil.go b/internal/testutil/testutil.go index c1cffff3f9..429d08459b 100644 --- a/internal/testutil/testutil.go +++ b/internal/testutil/testutil.go @@ -143,8 +143,7 @@ func RunAllTestsWithParam(t *testing.T, v interface{}) { m := reflect.ValueOf(v) typ := m.Type() - for i := 0; i < typ.NumMethod(); i++ { - i := i + for i := range typ.NumMethod() { meth := typ.Method(i) if strings.HasPrefix(meth.Name, "Test") { diff --git a/internal/workshare/workshare_pool.go b/internal/workshare/workshare_pool.go index 17a8eed8a7..fee2cb821e 100644 --- a/internal/workshare/workshare_pool.go +++ b/internal/workshare/workshare_pool.go @@ -48,7 +48,7 @@ func NewPool[T any](numWorkers int) *Pool[T] { semaphore: make(chan struct{}, numWorkers), } - for i := 0; i < numWorkers; i++ { + for range numWorkers { w.wg.Add(1) go func() { diff --git a/internal/workshare/workshare_test.go b/internal/workshare/workshare_test.go index 0d168fb156..de537b48cf 100644 --- a/internal/workshare/workshare_test.go +++ b/internal/workshare/workshare_test.go @@ -22,7 +22,7 @@ func buildTree(level int) *treeNode { return n } - for i := 0; i < level; i++ { + for range level { n.children = append(n.children, buildTree(level-1)) } @@ -71,9 +71,7 @@ func computeTreeSum(workPool *workshare.Pool[*computeTreeSumRequest], n *treeNod } } - for _, req := range cs.Wait() { - twr := req - + for _, twr := range cs.Wait() { if twr.err != nil { return 0, twr.err } @@ -158,7 +156,7 @@ func BenchmarkComputeTreeSum(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { computeTreeSum(w, treeToWalk) } } diff --git a/repo/blob/rclone/rclone_storage_test.go b/repo/blob/rclone/rclone_storage_test.go index 6bcbaf3dad..b226d24904 100644 --- a/repo/blob/rclone/rclone_storage_test.go +++ b/repo/blob/rclone/rclone_storage_test.go @@ -90,7 +90,7 @@ func TestRCloneStorage(t *testing.T) { // trigger multiple parallel reads to ensure we're properly preventing race // described in https://github.com/kopia/kopia/issues/624 - for i := 0; i < 100; i++ { + for range 100 { eg.Go(func() error { var tmp gather.WriteBuffer defer tmp.Close() @@ -222,8 +222,6 @@ func TestRCloneProviders(t *testing.T) { rcloneExe := mustGetRcloneExeOrSkip(t) for name, rp := range rcloneExternalProviders { - rp := rp - opt := &rclone.Options{ RemotePath: rp, RCloneExe: rcloneExe, @@ -264,14 +262,13 @@ func TestRCloneProviders(t *testing.T) { prefix := uuid.NewString() - for i := 0; i < 10; i++ { - i := i + for i := range 10 { wg.Add(1) go func() { defer wg.Done() - for j := 0; j < 3; j++ { + for j := range 3 { assert.NoError(t, st.PutBlob(ctx, blob.ID(fmt.Sprintf("%v-%v-%v", prefix, i, j)), gather.FromSlice([]byte{1, 2, 3}), blob.PutOptions{})) } }() @@ -294,8 +291,8 @@ func TestRCloneProviders(t *testing.T) { var eg errgroup.Group - for i := 0; i < 10; i++ { - for j := 0; j < 3; j++ { + for i := range 10 { + for j := range 3 { blobID := blob.ID(fmt.Sprintf("%v-%v-%v", prefix, i, j)) eg.Go(func() error { diff --git a/repo/blob/s3/s3_storage_test.go b/repo/blob/s3/s3_storage_test.go index d26bc3bae1..63713c20d0 100644 --- a/repo/blob/s3/s3_storage_test.go +++ b/repo/blob/s3/s3_storage_test.go @@ -165,8 +165,6 @@ func TestS3StorageProviders(t *testing.T) { t.Parallel() for k, env := range providerCreds { - env := env - t.Run(k, func(t *testing.T) { opt := getProviderOptions(t, env) diff --git a/repo/blob/s3/s3_versioned.go b/repo/blob/s3/s3_versioned.go index e1b0f7624c..70537d677c 100644 --- a/repo/blob/s3/s3_versioned.go +++ b/repo/blob/s3/s3_versioned.go @@ -80,8 +80,7 @@ func (s *s3Storage) list(ctx context.Context, prefix blob.ID, onlyMatching bool, return nil } - oi := o - om := infoToVersionMetadata(s.Prefix, &oi) + om := infoToVersionMetadata(s.Prefix, &o) if err := callback(om); err != nil { return errors.Wrapf(err, "callback failed for %q", o.Key) diff --git a/repo/blob/sftp/sftp_storage_test.go b/repo/blob/sftp/sftp_storage_test.go index 5fa83c4624..46862d0b41 100644 --- a/repo/blob/sftp/sftp_storage_test.go +++ b/repo/blob/sftp/sftp_storage_test.go @@ -173,7 +173,6 @@ func TestSFTPStorageValid(t *testing.T) { host, port, knownHostsFile := startDockerSFTPServerOrSkip(t, idRSA) for _, embedCreds := range []bool{false, true} { - embedCreds := embedCreds t.Run(fmt.Sprintf("Embed=%v", embedCreds), func(t *testing.T) { ctx := testlogging.Context(t) diff --git a/repo/blob/sharded/sharded_test.go b/repo/blob/sharded/sharded_test.go index 233d3ed52d..4663f65159 100644 --- a/repo/blob/sharded/sharded_test.go +++ b/repo/blob/sharded/sharded_test.go @@ -156,8 +156,6 @@ func TestShardedFileStorageShardingMap(t *testing.T) { } for _, tc := range cases { - tc := tc - t.Run(tc.desc, func(t *testing.T) { ctx := testlogging.Context(t) @@ -183,7 +181,7 @@ func TestShardedFileStorageShardingMap(t *testing.T) { } for _, blobID := range allBlobIDs { - for i := 0; i < len(blobID); i++ { + for i := range len(blobID) { prefix := blobID[0:i] var wantMatches []blob.ID diff --git a/repo/blob/storage.go b/repo/blob/storage.go index 3aa030ef58..f4a4b105a7 100644 --- a/repo/blob/storage.go +++ b/repo/blob/storage.go @@ -270,8 +270,6 @@ func IterateAllPrefixesInParallel(ctx context.Context, parallelism int, st Stora for _, prefix := range prefixes { wg.Add(1) - prefix := prefix - // acquire semaphore semaphore <- struct{}{} @@ -366,8 +364,6 @@ func DeleteMultiple(ctx context.Context, st Storage, ids []ID, parallelism int) // acquire semaphore sem <- struct{}{} - id := id - eg.Go(func() error { defer func() { <-sem // release semaphore diff --git a/repo/blob/throttling/throttler_test.go b/repo/blob/throttling/throttler_test.go index 4900697a2b..2e26b5e0bd 100644 --- a/repo/blob/throttling/throttler_test.go +++ b/repo/blob/throttling/throttler_test.go @@ -111,7 +111,7 @@ func testRateLimiting(t *testing.T, name string, wantRate float64, worker func(t var wg sync.WaitGroup - for i := 0; i < numWorkers; i++ { + for range numWorkers { wg.Add(1) go func() { diff --git a/repo/blob/throttling/throttling_semaphore_test.go b/repo/blob/throttling/throttling_semaphore_test.go index 5e4c4b098c..a11f7579ae 100644 --- a/repo/blob/throttling/throttling_semaphore_test.go +++ b/repo/blob/throttling/throttling_semaphore_test.go @@ -26,13 +26,13 @@ func TestThrottlingSemaphore(t *testing.T) { maxConcurrency int ) - for i := 0; i < 10; i++ { + for range 10 { wg.Add(1) go func() { defer wg.Done() - for j := 0; j < 10; j++ { + for range 10 { s.Acquire() mu.Lock() diff --git a/repo/blob/webdav/webdav_storage_test.go b/repo/blob/webdav/webdav_storage_test.go index e352d7ea51..4c4fd69095 100644 --- a/repo/blob/webdav/webdav_storage_test.go +++ b/repo/blob/webdav/webdav_storage_test.go @@ -84,7 +84,6 @@ func TestWebDAVStorageBuiltInServer(t *testing.T) { {1, 2}, {2, 2, 2}, } { - shardSpec := shardSpec t.Run(fmt.Sprintf("shards-%v", shardSpec), func(t *testing.T) { if err := os.RemoveAll(tmpDir); err != nil { t.Errorf("can't remove all: %q", tmpDir) diff --git a/repo/compression/compressor_test.go b/repo/compression/compressor_test.go index c465fdbdb9..60c4f4200f 100644 --- a/repo/compression/compressor_test.go +++ b/repo/compression/compressor_test.go @@ -14,8 +14,6 @@ func TestMain(m *testing.M) { testutil.MyTestMain(m) } func TestCompressor(t *testing.T) { for id, comp := range ByHeaderID { - id, comp := id, comp - t.Run(fmt.Sprintf("compressible-data-%x", id), func(t *testing.T) { // make sure all-zero data is compressed data := make([]byte, 10000) @@ -136,7 +134,7 @@ func compressionBenchmark(b *testing.B, comp Compressor, input []byte, output *b rdr := bytes.NewReader(input) - for i := 0; i < b.N; i++ { + for range b.N { output.Reset() rdr.Reset(input) @@ -153,7 +151,7 @@ func decompressionBenchmark(b *testing.B, comp Compressor, input []byte, output rdr := bytes.NewReader(input) - for i := 0; i < b.N; i++ { + for range b.N { output.Reset() rdr.Reset(input) diff --git a/repo/content/committed_content_index.go b/repo/content/committed_content_index.go index 1769c74205..067bf9d248 100644 --- a/repo/content/committed_content_index.go +++ b/repo/content/committed_content_index.go @@ -311,7 +311,7 @@ func (c *committedContentIndex) fetchIndexBlobs(ctx context.Context, isPermissiv eg, ctx := errgroup.WithContext(ctx) - for i := 0; i < parallelFetches; i++ { + for range parallelFetches { eg.Go(func() error { var data gather.WriteBuffer defer data.Close() diff --git a/repo/content/committed_read_manager.go b/repo/content/committed_read_manager.go index c45ffc9d53..619eec07e0 100644 --- a/repo/content/committed_read_manager.go +++ b/repo/content/committed_read_manager.go @@ -203,7 +203,7 @@ func (sm *SharedManager) attemptReadPackFileLocalIndex(ctx context.Context, pack func (sm *SharedManager) loadPackIndexesLocked(ctx context.Context) error { nextSleepTime := 100 * time.Millisecond //nolint:gomnd - for i := 0; i < indexLoadAttempts; i++ { + for i := range indexLoadAttempts { ibm, err0 := sm.indexBlobManager(ctx) if err0 != nil { return err0 diff --git a/repo/content/content_formatter_test.go b/repo/content/content_formatter_test.go index fe01e8f9f6..fda3d8587c 100644 --- a/repo/content/content_formatter_test.go +++ b/repo/content/content_formatter_test.go @@ -28,10 +28,8 @@ func TestFormatters(t *testing.T) { h0 := sha1.Sum(data) for _, hashAlgo := range hashing.SupportedAlgorithms() { - hashAlgo := hashAlgo t.Run(hashAlgo, func(t *testing.T) { for _, encryptionAlgo := range encryption.SupportedAlgorithms(true) { - encryptionAlgo := encryptionAlgo t.Run(encryptionAlgo, func(t *testing.T) { ctx := testlogging.Context(t) diff --git a/repo/content/content_manager_iterate.go b/repo/content/content_manager_iterate.go index 6e6691e42a..ece03dfb8d 100644 --- a/repo/content/content_manager_iterate.go +++ b/repo/content/content_manager_iterate.go @@ -64,7 +64,7 @@ func maybeParallelExecutor(parallel int, originalCallback IterateCallback) (Iter // start N workers, each fetching from the shared channel and invoking the provided callback. // cleanup() must be called to for worker completion - for i := 0; i < parallel; i++ { + for range parallel { wg.Add(1) go func() { @@ -266,7 +266,7 @@ func (bm *WriteManager) IterateUnreferencedBlobs(ctx context.Context, blobPrefix } else { // iterate {p,q}[0-9,a-f] for _, prefix := range blobPrefixes { - for hexDigit := 0; hexDigit < 16; hexDigit++ { + for hexDigit := range 16 { prefixes = append(prefixes, blob.ID(fmt.Sprintf("%v%x", prefix, hexDigit))) } } diff --git a/repo/content/content_manager_test.go b/repo/content/content_manager_test.go index 338b80ac01..91cf721c28 100644 --- a/repo/content/content_manager_test.go +++ b/repo/content/content_manager_test.go @@ -141,7 +141,7 @@ func (s *contentManagerSuite) TestContentManagerSmallContentWrites(t *testing.T) defer bm.CloseShared(ctx) itemCount := maxPackCapacity / (10 + encryptionOverhead) - for i := 0; i < itemCount; i++ { + for i := range itemCount { writeContentAndVerify(ctx, t, bm, seededRandomData(i, 10)) } @@ -163,7 +163,7 @@ func (s *contentManagerSuite) TestContentManagerDedupesPendingContents(t *testin defer bm.CloseShared(ctx) - for i := 0; i < 100; i++ { + for range 100 { writeContentAndVerify(ctx, t, bm, seededRandomData(0, maxPackCapacity/2)) } @@ -264,7 +264,7 @@ func (s *contentManagerSuite) TestContentManagerInternalFlush(t *testing.T) { defer bm.CloseShared(ctx) itemsToOverflow := (maxPackCapacity)/(25+encryptionOverhead) + 2 - for i := 0; i < itemsToOverflow; i++ { + for range itemsToOverflow { b := make([]byte, 25) cryptorand.Read(b) writeContentAndVerify(ctx, t, bm, b) @@ -274,7 +274,7 @@ func (s *contentManagerSuite) TestContentManagerInternalFlush(t *testing.T) { verifyBlobCount(t, data, map[blob.ID]int{"s": 1, "p": 1}) // do it again - should be 2 blobs + some bytes pending. - for i := 0; i < itemsToOverflow; i++ { + for range itemsToOverflow { b := make([]byte, 25) cryptorand.Read(b) writeContentAndVerify(ctx, t, bm, b) @@ -310,7 +310,7 @@ func (s *contentManagerSuite) TestContentManagerWriteMultiple(t *testing.T) { repeatCount = 500 } - for i := 0; i < repeatCount; i++ { + for i := range repeatCount { b := seededRandomData(i, i%113) blkID, err := bm.WriteContent(ctx, gather.FromSlice(b), "", NoCompression) @@ -1036,9 +1036,7 @@ func (s *contentManagerSuite) TestParallelWrites(t *testing.T) { workerWritten := make([][]ID, numWorkers) // start numWorkers, each writing random block and recording it - for workerID := 0; workerID < numWorkers; workerID++ { - workerID := workerID - + for workerID := range numWorkers { workersWG.Add(1) go func() { @@ -1287,8 +1285,6 @@ func (s *contentManagerSuite) TestHandleWriteErrors(t *testing.T) { } for n, tc := range cases { - tc := tc - t.Run(fmt.Sprintf("case-%v", n), func(t *testing.T) { ctx := testlogging.Context(t) data := blobtesting.DataMap{} @@ -1334,11 +1330,8 @@ func (s *contentManagerSuite) TestRewriteNonDeleted(t *testing.T) { // perform a sequence WriteContent() RewriteContent() GetContent() // where actionX can be (0=flush and reopen, 1=flush, 2=nothing) - for action1 := 0; action1 < stepBehaviors; action1++ { - for action2 := 0; action2 < stepBehaviors; action2++ { - action1 := action1 - action2 := action2 - + for action1 := range stepBehaviors { + for action2 := range stepBehaviors { t.Run(fmt.Sprintf("case-%v-%v", action1, action2), func(t *testing.T) { ctx := testlogging.Context(t) data := blobtesting.DataMap{} @@ -1384,7 +1377,7 @@ func (s *contentManagerSuite) TestDisableFlush(t *testing.T) { bm.DisableIndexFlush(ctx) bm.DisableIndexFlush(ctx) - for i := 0; i < 500; i++ { + for i := range 500 { writeContentAndVerify(ctx, t, bm, seededRandomData(i, 100)) } bm.Flush(ctx) // flush will not have effect @@ -1404,12 +1397,9 @@ func (s *contentManagerSuite) TestRewriteDeleted(t *testing.T) { // perform a sequence WriteContent() Delete() RewriteContent() GetContent() // where actionX can be (0=flush and reopen, 1=flush, 2=nothing) - for action1 := 0; action1 < stepBehaviors; action1++ { - for action2 := 0; action2 < stepBehaviors; action2++ { - for action3 := 0; action3 < stepBehaviors; action3++ { - action1 := action1 - action2 := action2 - action3 := action3 + for action1 := range stepBehaviors { + for action2 := range stepBehaviors { + for action3 := range stepBehaviors { t.Run(fmt.Sprintf("case-%v-%v-%v", action1, action2, action3), func(t *testing.T) { ctx := testlogging.Context(t) data := blobtesting.DataMap{} @@ -1470,7 +1460,6 @@ func (s *contentManagerSuite) TestDeleteAndRecreate(t *testing.T) { } for _, tc := range cases { - tc := tc t.Run(tc.desc, func(t *testing.T) { // write a content data := blobtesting.DataMap{} @@ -1635,7 +1624,6 @@ func (s *contentManagerSuite) TestIterateContents(t *testing.T) { } for _, tc := range cases { - tc := tc t.Run(tc.desc, func(t *testing.T) { var mu sync.Mutex got := map[ID]bool{} @@ -1865,7 +1853,6 @@ func (s *contentManagerSuite) TestContentReadAliasing(t *testing.T) { func (s *contentManagerSuite) TestVersionCompatibility(t *testing.T) { for writeVer := format.MinSupportedReadVersion; writeVer <= format.CurrentWriteVersion; writeVer++ { - writeVer := writeVer t.Run(fmt.Sprintf("version-%v", writeVer), func(t *testing.T) { s.verifyVersionCompat(t, writeVer) }) @@ -1984,10 +1971,10 @@ func (s *contentManagerSuite) verifyReadsOwnWrites(t *testing.T, st blob.Storage bm := s.newTestContentManagerWithTweaks(t, st, tweaks) ids := make([]ID, 100) - for i := 0; i < len(ids); i++ { + for i := range len(ids) { ids[i] = writeContentAndVerify(ctx, t, bm, seededRandomData(i, maxPackCapacity/2)) - for j := 0; j < i; j++ { + for j := range i { // verify all contents written so far verifyContent(ctx, t, bm, ids[j], seededRandomData(j, maxPackCapacity/2)) } @@ -2005,7 +1992,7 @@ func (s *contentManagerSuite) verifyReadsOwnWrites(t *testing.T, st blob.Storage require.NoError(t, bm.CloseShared(ctx)) bm = s.newTestContentManagerWithTweaks(t, st, tweaks) - for i := 0; i < len(ids); i++ { + for i := range len(ids) { verifyContent(ctx, t, bm, ids[i], seededRandomData(i, maxPackCapacity/2)) } } @@ -2257,12 +2244,8 @@ func (s *contentManagerSuite) TestPrefetchContent(t *testing.T) { } for _, hint := range hints { - hint := hint - t.Run("hint:"+hint, func(t *testing.T) { for _, tc := range cases { - tc := tc - t.Run(tc.name, func(t *testing.T) { wipeCache(t, ccd.CacheStorage()) wipeCache(t, ccm.CacheStorage()) @@ -2302,10 +2285,10 @@ func (s *contentManagerSuite) TestContentPermissiveCacheLoading(t *testing.T) { bm := s.newTestContentManagerWithTweaks(t, st, tweaks) ids := make([]ID, 100) - for i := 0; i < len(ids); i++ { + for i := range ids { ids[i] = writeContentAndVerify(ctx, t, bm, seededRandomData(i, maxPackCapacity/2)) - for j := 0; j < i; j++ { + for j := range i { // verify all contents written so far verifyContent(ctx, t, bm, ids[j], seededRandomData(j, maxPackCapacity/2)) } @@ -2331,7 +2314,7 @@ func (s *contentManagerSuite) TestContentPermissiveCacheLoading(t *testing.T) { bm = s.newTestContentManagerWithTweaks(t, st, tweaks) - for i := 0; i < len(ids); i++ { + for i := range ids { verifyContent(ctx, t, bm, ids[i], seededRandomData(i, maxPackCapacity/2)) } } @@ -2353,10 +2336,10 @@ func (s *contentManagerSuite) TestContentIndexPermissiveReadsWithFault(t *testin bm := s.newTestContentManagerWithTweaks(t, st, tweaks) ids := make([]ID, 100) - for i := 0; i < len(ids); i++ { + for i := range len(ids) { ids[i] = writeContentAndVerify(ctx, t, bm, seededRandomData(i, maxPackCapacity/2)) - for j := 0; j < i; j++ { + for j := range i { // verify all contents written so far verifyContent(ctx, t, bm, ids[j], seededRandomData(j, maxPackCapacity/2)) } @@ -2384,7 +2367,7 @@ func (s *contentManagerSuite) TestContentIndexPermissiveReadsWithFault(t *testin bm = s.newTestContentManagerWithTweaks(t, st, tweaks) - for i := 0; i < len(ids); i++ { + for i := range len(ids) { verifyContent(ctx, t, bm, ids[i], seededRandomData(i, maxPackCapacity/2)) } } diff --git a/repo/content/content_prefetch.go b/repo/content/content_prefetch.go index 453cfd35ea..f591ead932 100644 --- a/repo/content/content_prefetch.go +++ b/repo/content/content_prefetch.go @@ -103,7 +103,7 @@ func (bm *WriteManager) PrefetchContents(ctx context.Context, contentIDs []ID, h } }() - for i := 0; i < parallelFetches; i++ { + for range parallelFetches { wg.Add(1) go func() { diff --git a/repo/content/index/index_builder.go b/repo/content/index/index_builder.go index ec541c7736..974d6e6ae9 100644 --- a/repo/content/index/index_builder.go +++ b/repo/content/index/index_builder.go @@ -50,11 +50,11 @@ func (b Builder) Add(i Info) { var base36Value [256]byte func init() { - for i := 0; i < 10; i++ { + for i := range 10 { base36Value['0'+i] = byte(i) } - for i := 0; i < 26; i++ { + for i := range 26 { base36Value['a'+i] = byte(i + 10) //nolint:gomnd base36Value['A'+i] = byte(i + 10) //nolint:gomnd } @@ -83,9 +83,7 @@ func (b Builder) sortedContents() []Info { var wg sync.WaitGroup numWorkers := runtime.NumCPU() - for worker := 0; worker < numWorkers; worker++ { - worker := worker - + for worker := range numWorkers { wg.Add(1) go func() { @@ -108,7 +106,7 @@ func (b Builder) sortedContents() []Info { // Phase 3 - merge results from all buckets. result := make([]Info, 0, len(b)) - for i := 0; i < len(buckets); i++ { + for i := range len(buckets) { result = append(result, buckets[i]...) } diff --git a/repo/content/index/index_v2.go b/repo/content/index/index_v2.go index c170bfcc97..9bba4f6f3b 100644 --- a/repo/content/index/index_v2.go +++ b/repo/content/index/index_v2.go @@ -788,7 +788,7 @@ func openV2PackIndex(data []byte, closer func() error) (Index, error) { func parseFormatsBuffer(formatsBuf []byte, cnt int) []indexV2FormatInfo { formats := make([]indexV2FormatInfo, cnt) - for i := 0; i < cnt; i++ { + for i := range cnt { f := formatsBuf[v2FormatInfoSize*i:] formats[i].compressionHeaderID = compression.HeaderID(binary.BigEndian.Uint32(f[v2FormatOffsetCompressionID:])) diff --git a/repo/content/index/packindex_test.go b/repo/content/index/packindex_test.go index 2e1f551846..93d379ae80 100644 --- a/repo/content/index/packindex_test.go +++ b/repo/content/index/packindex_test.go @@ -112,7 +112,7 @@ func TestPackIndex_V2(t *testing.T) { func testPackIndex(t *testing.T, version int) { var infos []Info // deleted contents with all information - for i := 0; i < 100; i++ { + for i := range 100 { infos = append(infos, Info{ TimestampSeconds: randomUnixTime(), Deleted: true, @@ -127,7 +127,7 @@ func testPackIndex(t *testing.T, version int) { }) } // non-deleted content - for i := 0; i < 100; i++ { + for i := range 100 { infos = append(infos, Info{ TimestampSeconds: randomUnixTime(), ContentID: deterministicContentID(t, "packed", i), @@ -229,7 +229,7 @@ func testPackIndex(t *testing.T, version int) { prefixes := []IDPrefix{"a", "b", "f", "0", "3", "aa", "aaa", "aab", "fff", "m", "x", "y", "m0", "ma"} - for i := 0; i < 100; i++ { + for i := range 100 { contentID := deterministicContentID(t, "no-such-content", i) v, err := ndx.GetInfo(contentID) @@ -244,7 +244,6 @@ func testPackIndex(t *testing.T, version int) { for _, prefix := range prefixes { cnt2 := 0 - prefix := prefix require.NoError(t, ndx.Iterate(PrefixRange(prefix), func(info2 InfoReader) error { cnt2++ if !strings.HasPrefix(info2.GetContentID().String(), string(prefix)) { @@ -300,7 +299,7 @@ func TestPackIndexPerContentLimits(t *testing.T) { func TestSortedContents(t *testing.T) { b := Builder{} - for i := 0; i < 100; i++ { + for i := range 100 { v := deterministicContentID(t, "", i) b.Add(Info{ @@ -370,7 +369,7 @@ func TestSortedContents2(t *testing.T) { func TestPackIndexV2TooManyUniqueFormats(t *testing.T) { b := Builder{} - for i := 0; i < v2MaxFormatCount; i++ { + for i := range v2MaxFormatCount { v := deterministicContentID(t, "", i) b.Add(Info{ @@ -416,18 +415,18 @@ func fuzzTestIndexOpen(originalData []byte) { } func fuzzTest(rnd *rand.Rand, originalData []byte, rounds int, callback func(d []byte)) { - for round := 0; round < rounds; round++ { + for range rounds { data := append([]byte(nil), originalData...) // mutate small number of bytes bytesToMutate := rnd.Intn(3) - for i := 0; i < bytesToMutate; i++ { + for range bytesToMutate { pos := rnd.Intn(len(data)) data[pos] = byte(rnd.Int()) } sectionsToInsert := rnd.Intn(3) - for i := 0; i < sectionsToInsert; i++ { + for range sectionsToInsert { pos := rnd.Intn(len(data)) insertedLength := rnd.Intn(20) insertedData := make([]byte, insertedLength) @@ -437,7 +436,7 @@ func fuzzTest(rnd *rand.Rand, originalData []byte, rounds int, callback func(d [ } sectionsToDelete := rnd.Intn(3) - for i := 0; i < sectionsToDelete; i++ { + for range sectionsToDelete { pos := rnd.Intn(len(data)) deletedLength := rnd.Intn(10) @@ -494,7 +493,7 @@ func verifyAllShardedIDs(t *testing.T, sharded []Builder, numTotal, numShards in require.Len(t, sharded, numShards) m := map[ID]bool{} - for i := 0; i < numTotal; i++ { + for i := range numTotal { m[deterministicContentID(t, "", i)] = true } diff --git a/repo/content/indexblob/index_blob_manager_v0_test.go b/repo/content/indexblob/index_blob_manager_v0_test.go index 979d1a6dd0..7c6a9d9e0d 100644 --- a/repo/content/indexblob/index_blob_manager_v0_test.go +++ b/repo/content/indexblob/index_blob_manager_v0_test.go @@ -66,8 +66,6 @@ func TestIndexBlobManager(t *testing.T) { } for _, tc := range cases { - tc := tc - t.Run(fmt.Sprintf("%v", tc), func(t *testing.T) { // fake underlying blob store with fake time storageData := blobtesting.DataMap{} @@ -196,8 +194,7 @@ func TestIndexBlobManagerStress(t *testing.T) { numActors := 2 - for actorID := 0; actorID < numActors; actorID++ { - actorID := actorID + for actorID := range numActors { loggedSt := logging.NewWrapper(st, testlogging.Printf(func(m string, args ...interface{}) { t.Logf(fmt.Sprintf("@%v actor[%v]:", fakeTimeFunc().Format("150405.000"), actorID)+m, args...) }, ""), "") @@ -334,7 +331,7 @@ func TestIndexBlobManagerPreventsResurrectOfDeletedContents_RandomizedTimings(t } // the test is randomized and runs very quickly, run it lots of times - for i := 0; i < numAttempts; i++ { + for i := range numAttempts { t.Run(fmt.Sprintf("attempt-%v", i), func(t *testing.T) { verifyIndexBlobManagerPreventsResurrectOfDeletedContents( t, @@ -435,7 +432,7 @@ func verifyFakeContentsWritten(ctx context.Context, t *testing.T, m *ManagerV0, } // verify that all contents previously written can be read. - for i := 0; i < numWritten; i++ { + for i := range numWritten { id := fakeContentID(contentPrefix, i) if _, ok := all[id]; !ok { if deletedContents[id] { @@ -523,7 +520,7 @@ func deleteFakeContents(ctx context.Context, t *testing.T, m *ManagerV0, prefix ndx := map[string]fakeContentIndexEntry{} - for i := 0; i < count; i++ { + for range count { n := fakeContentID(prefix, rand.Intn(numWritten)) if deleted[n] { continue @@ -592,7 +589,7 @@ func writeFakeContents(ctx context.Context, t *testing.T, m *ManagerV0, prefix s ndx := map[string]fakeContentIndexEntry{} - for i := 0; i < count; i++ { + for range count { n := fakeContentID(prefix, *numWritten) ndx[n] = fakeContentIndexEntry{ ModTime: timeFunc(), diff --git a/repo/ecc/ecc_rs_crc.go b/repo/ecc/ecc_rs_crc.go index 0a3bd449df..919f1e6255 100644 --- a/repo/ecc/ecc_rs_crc.go +++ b/repo/ecc/ecc_rs_crc.go @@ -196,15 +196,15 @@ func (r *ReedSolomonCrcECC) Encrypt(input gather.Bytes, _ []byte, output *gather inputPos := 0 - for b := 0; b < sizes.Blocks; b++ { + for range sizes.Blocks { eccPos := 0 - for i := 0; i < sizes.DataShards; i++ { + for i := range sizes.DataShards { shards[i] = inputBytes[inputPos : inputPos+sizes.ShardSize] inputPos += sizes.ShardSize } - for i := 0; i < sizes.ParityShards; i++ { + for i := range sizes.ParityShards { shards[sizes.DataShards+i] = eccBytes[eccPos : eccPos+sizes.ShardSize] eccPos += sizes.ShardSize } @@ -214,7 +214,7 @@ func (r *ReedSolomonCrcECC) Encrypt(input gather.Bytes, _ []byte, output *gather return errors.Wrap(err, "Error computing ECC") } - for i := 0; i < sizes.ParityShards; i++ { + for i := range sizes.ParityShards { s := sizes.DataShards + i binary.BigEndian.PutUint32(crcBytes, crc32.ChecksumIEEE(shards[s])) @@ -278,9 +278,9 @@ func (r *ReedSolomonCrcECC) Decrypt(input gather.Bytes, _ []byte, output *gather writeOriginalPos := 0 paddingStartPos := len(copied) - parityPlusCrcSizeInBlock*sizes.Blocks - for b := 0; b < sizes.Blocks; b++ { - for i := 0; i < sizes.DataShards; i++ { - initialDataPos := dataPos + for b := range sizes.Blocks { + for i := range sizes.DataShards { + initialDataPos := dataPos //nolint:copyloopvar crc := binary.BigEndian.Uint32(dataBytes[dataPos : dataPos+crcSize]) dataPos += crcSize @@ -297,7 +297,7 @@ func (r *ReedSolomonCrcECC) Decrypt(input gather.Bytes, _ []byte, output *gather } } - for i := 0; i < sizes.ParityShards; i++ { + for i := range sizes.ParityShards { s := sizes.DataShards + i crc := binary.BigEndian.Uint32(eccBytes[eccPos : eccPos+crcSize]) @@ -351,7 +351,7 @@ func readLength(shards [][]byte, sizes *sizesInfo) (originalSize, startShard, st startShard = 4 startByte = 0 - for i := 0; i < 4; i++ { + for i := range 4 { lengthBuffer[i] = shards[i][0] } diff --git a/repo/ecc/ecc_rs_crc_test.go b/repo/ecc/ecc_rs_crc_test.go index 4cefcab542..5975006a13 100644 --- a/repo/ecc/ecc_rs_crc_test.go +++ b/repo/ecc/ecc_rs_crc_test.go @@ -117,7 +117,7 @@ func testRsCrc32ChangeInData(t *testing.T, opts *Options, originalSize, changedB sizes := impl.(*ReedSolomonCrcECC).computeSizesFromOriginal(originalSize) parity := sizes.ParityShards * (crcSize + sizes.ShardSize) * sizes.Blocks - for i := 0; i < changedBytes; i++ { + for i := range changedBytes { flipByte(data, parity+i*(crcSize+sizes.ShardSize)+crcSize) } }) @@ -131,7 +131,7 @@ func testRsCrc32ChangeInDataCrc(t *testing.T, opts *Options, originalSize, chang sizes := impl.(*ReedSolomonCrcECC).computeSizesFromOriginal(originalSize) parity := sizes.ParityShards * (crcSize + sizes.ShardSize) * sizes.Blocks - for i := 0; i < changedBytes; i++ { + for i := range changedBytes { flipByte(data, parity+i*(crcSize+sizes.ShardSize)) } }) @@ -144,7 +144,7 @@ func testRsCrc32ChangeInParity(t *testing.T, opts *Options, originalSize, change func(impl encryption.Encryptor, data []byte) { sizes := impl.(*ReedSolomonCrcECC).computeSizesFromOriginal(originalSize) - for i := 0; i < changedBytes; i++ { + for i := range changedBytes { flipByte(data, i*(crcSize+sizes.ShardSize)+crcSize) } }) @@ -157,7 +157,7 @@ func testRsCrc32ChangeInParityCrc(t *testing.T, opts *Options, originalSize, cha func(impl encryption.Encryptor, data []byte) { sizes := impl.(*ReedSolomonCrcECC).computeSizesFromOriginal(originalSize) - for i := 0; i < changedBytes; i++ { + for i := range changedBytes { flipByte(data, i*(crcSize+sizes.ShardSize)) } }) diff --git a/repo/ecc/ecc_utils_test.go b/repo/ecc/ecc_utils_test.go index f87803c76f..ac2127dbad 100644 --- a/repo/ecc/ecc_utils_test.go +++ b/repo/ecc/ecc_utils_test.go @@ -37,7 +37,7 @@ func testPutAndGet(t *testing.T, opts *Options, originalSize, require.NoError(t, err) original := make([]byte, originalSize) - for i := 0; i < originalSize; i++ { + for i := range originalSize { original[i] = byte(i%255) + 1 } diff --git a/repo/encryption/encryption_test.go b/repo/encryption/encryption_test.go index 80c9c701eb..06f023f736 100644 --- a/repo/encryption/encryption_test.go +++ b/repo/encryption/encryption_test.go @@ -36,7 +36,6 @@ func TestRoundTrip(t *testing.T) { rand.Read(contentID2) for _, encryptionAlgo := range encryption.SupportedAlgorithms(true) { - encryptionAlgo := encryptionAlgo t.Run(encryptionAlgo, func(t *testing.T) { e, err := encryption.CreateEncryptor(parameters{encryptionAlgo, masterKey}) if err != nil { @@ -189,7 +188,7 @@ func BenchmarkEncryption(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { var out gather.WriteBuffer enc.Encrypt(plainText, iv, &out) diff --git a/repo/format/format_blob_cache_test.go b/repo/format/format_blob_cache_test.go index 105b005efb..3435fe3ecb 100644 --- a/repo/format/format_blob_cache_test.go +++ b/repo/format/format_blob_cache_test.go @@ -30,8 +30,6 @@ func TestFormatBlobCache(t *testing.T) { t.Run("Cases", func(t *testing.T) { for _, tc := range cases { - tc := tc - t.Run(tc.desc, func(t *testing.T) { t.Parallel() diff --git a/repo/format/format_blob_test.go b/repo/format/format_blob_test.go index c5b5513c14..ab454ea97b 100644 --- a/repo/format/format_blob_test.go +++ b/repo/format/format_blob_test.go @@ -61,7 +61,6 @@ func TestFormatBlobRecovery(t *testing.T) { } for _, tc := range cases { - tc := tc t.Run(string(tc.blobID), func(t *testing.T) { v, err := RecoverFormatBlob(ctx, st, tc.blobID, -1) if tc.err == nil { diff --git a/repo/hashing/hashing_test.go b/repo/hashing/hashing_test.go index c8b9e370ab..911d6bee0d 100644 --- a/repo/hashing/hashing_test.go +++ b/repo/hashing/hashing_test.go @@ -28,8 +28,6 @@ func TestRoundTrip(t *testing.T) { rand.Read(hmacSecret) for _, hashingAlgo := range hashing.SupportedAlgorithms() { - hashingAlgo := hashingAlgo - t.Run(hashingAlgo, func(t *testing.T) { f, err := hashing.CreateHashFunc(parameters{hashingAlgo, hmacSecret}) if err != nil { diff --git a/repo/logging/logging_test.go b/repo/logging/logging_test.go index 0d436e7f00..40815008b6 100644 --- a/repo/logging/logging_test.go +++ b/repo/logging/logging_test.go @@ -105,7 +105,7 @@ func BenchmarkLogger(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { mod1(ctx) } } diff --git a/repo/maintenance/blob_gc.go b/repo/maintenance/blob_gc.go index f55c9fb9b4..f23ab05ec7 100644 --- a/repo/maintenance/blob_gc.go +++ b/repo/maintenance/blob_gc.go @@ -40,7 +40,7 @@ func DeleteUnreferencedBlobs(ctx context.Context, rep repo.DirectRepositoryWrite if !opt.DryRun { // start goroutines to delete blobs as they come. - for i := 0; i < opt.Parallel; i++ { + for range opt.Parallel { eg.Go(func() error { for bm := range unused { if err := rep.BlobStorage().DeleteBlob(ctx, bm.BlobID); err != nil { diff --git a/repo/maintenance/blob_retain.go b/repo/maintenance/blob_retain.go index 3a8f16510a..1aba4a786e 100644 --- a/repo/maintenance/blob_retain.go +++ b/repo/maintenance/blob_retain.go @@ -59,7 +59,7 @@ func ExtendBlobRetentionTime(ctx context.Context, rep repo.DirectRepositoryWrite if !opt.DryRun { // start goroutines to extend blob retention as they come. - for i := 0; i < opt.Parallel; i++ { + for range opt.Parallel { wg.Add(1) go func() { diff --git a/repo/maintenance/content_rewrite.go b/repo/maintenance/content_rewrite.go index 1931de254f..21683eba8d 100644 --- a/repo/maintenance/content_rewrite.go +++ b/repo/maintenance/content_rewrite.go @@ -62,7 +62,7 @@ func RewriteContents(ctx context.Context, rep repo.DirectRepositoryWriter, opt * var wg sync.WaitGroup - for i := 0; i < opt.Parallel; i++ { + for range opt.Parallel { wg.Add(1) go func() { diff --git a/repo/maintenance/content_rewrite_test.go b/repo/maintenance/content_rewrite_test.go index 7b474ca737..2079279bb3 100644 --- a/repo/maintenance/content_rewrite_test.go +++ b/repo/maintenance/content_rewrite_test.go @@ -73,13 +73,11 @@ func (s *formatSpecificTestSuite) TestContentRewrite(t *testing.T) { } for _, tc := range cases { - tc := tc - t.Run(fmt.Sprintf("case-%v", tc), func(t *testing.T) { ctx, env := repotesting.NewEnvironment(t, s.formatVersion) // run N sessions to create N individual pack blobs for each content prefix - for i := 0; i < tc.numPContents; i++ { + for range tc.numPContents { require.NoError(t, repo.WriteSession(ctx, env.Repository, repo.WriteSessionOptions{}, func(ctx context.Context, w repo.RepositoryWriter) error { ow := w.NewObjectWriter(ctx, object.WriterOptions{}) fmt.Fprintf(ow, "%v", uuid.NewString()) @@ -88,7 +86,7 @@ func (s *formatSpecificTestSuite) TestContentRewrite(t *testing.T) { })) } - for i := 0; i < tc.numQContents; i++ { + for range tc.numQContents { require.NoError(t, repo.WriteSession(ctx, env.Repository, repo.WriteSessionOptions{}, func(ctx context.Context, w repo.RepositoryWriter) error { ow := w.NewObjectWriter(ctx, object.WriterOptions{Prefix: "k"}) fmt.Fprintf(ow, "%v", uuid.NewString()) diff --git a/repo/maintenance/maintenance_run_test.go b/repo/maintenance/maintenance_run_test.go index 9e6166be89..280acab8b1 100644 --- a/repo/maintenance/maintenance_run_test.go +++ b/repo/maintenance/maintenance_run_test.go @@ -69,8 +69,6 @@ func TestShouldDeleteOrphanedBlobs(t *testing.T) { } for _, tc := range cases { - tc := tc - t.Run(fmt.Sprintf("%v", tc), func(t *testing.T) { require.Equal(t, tc.want, shouldDeleteOrphanedPacks(now, &Schedule{ Runs: tc.runs, diff --git a/repo/manifest/manifest_manager_test.go b/repo/manifest/manifest_manager_test.go index f49f7884f1..9c567d8859 100644 --- a/repo/manifest/manifest_manager_test.go +++ b/repo/manifest/manifest_manager_test.go @@ -183,7 +183,7 @@ func TestManifestInitCorruptedBlock(t *testing.T) { for blobID, v := range data { for _, prefix := range content.PackBlobIDPrefixes { if strings.HasPrefix(string(blobID), string(prefix)) { - for i := 0; i < len(v); i++ { + for i := range len(v) { v[i] ^= 1 } } @@ -221,7 +221,6 @@ func TestManifestInitCorruptedBlock(t *testing.T) { } for _, tc := range cases { - tc := tc t.Run(tc.desc, func(t *testing.T) { err := tc.f() if err == nil || !strings.Contains(err.Error(), "invalid checksum") { @@ -378,7 +377,7 @@ func TestManifestAutoCompaction(t *testing.T) { mgr := newManagerForTesting(ctx, t, data, ManagerOptions{}) - for i := 0; i < 100; i++ { + for i := range 100 { item1 := map[string]int{"foo": 1, "bar": 2} labels1 := map[string]string{"type": "item", "color": "red"} found, err := mgr.Find(ctx, labels1) @@ -408,7 +407,7 @@ func TestManifestConfigureAutoCompaction(t *testing.T) { mgr := newManagerForTesting(ctx, t, data, ManagerOptions{AutoCompactionThreshold: compactionCount}) - for i := 0; i < compactionCount-1; i++ { + for range compactionCount - 1 { addAndVerify(ctx, t, mgr, labels1, item1) require.NoError(t, mgr.Flush(ctx)) require.NoError(t, mgr.b.Flush(ctx)) @@ -468,7 +467,7 @@ func TestManifestAutoCompactionWithReadOnly(t *testing.T) { mgr, err := NewManager(ctx, bm, ManagerOptions{}, nil) require.NoError(t, err, "getting initial manifest manager") - for i := 0; i < 100; i++ { + for range 100 { item1 := map[string]int{"foo": 1, "bar": 2} labels1 := map[string]string{"type": "item", "color": "red"} diff --git a/repo/object/object_manager_test.go b/repo/object/object_manager_test.go index 6785d8ffeb..5c1326e313 100644 --- a/repo/object/object_manager_test.go +++ b/repo/object/object_manager_test.go @@ -292,7 +292,7 @@ func TestObjectWriterRaceBetweenCheckpointAndResult(t *testing.T) { repeat = 5 } - for i := 0; i < repeat; i++ { + for range repeat { w := om.NewWriter(ctx, WriterOptions{ AsyncWrites: 1, }) @@ -662,8 +662,6 @@ func TestReaderStoredBlockNotFound(t *testing.T) { func TestEndToEndReadAndSeek(t *testing.T) { for _, asyncWrites := range []int{0, 4, 8} { - asyncWrites := asyncWrites - t.Run(fmt.Sprintf("async-%v", asyncWrites), func(t *testing.T) { t.Parallel() @@ -712,10 +710,7 @@ func TestEndToEndReadAndSeekWithCompression(t *testing.T) { } for _, compressible := range []bool{false, true} { - compressible := compressible - for compressorName := range compression.ByName { - compressorName := compressorName t.Run(string(compressorName), func(t *testing.T) { ctx := testlogging.Context(t) @@ -789,7 +784,7 @@ func verify(ctx context.Context, t *testing.T, cr contentReader, objectID ID, ex return } - for i := 0; i < 20; i++ { + for range 20 { sampleSize := int(rand.Int31n(300)) seekOffset := int(rand.Int31n(int32(len(expectedData)))) diff --git a/repo/object/objectid.go b/repo/object/objectid.go index d6c16e90f1..3f934d1db5 100644 --- a/repo/object/objectid.go +++ b/repo/object/objectid.go @@ -82,7 +82,7 @@ func (i ID) String() string { // Append appends string representation of ObjectID that is suitable for displaying in the UI. func (i ID) Append(out []byte) []byte { - for j := 0; j < int(i.indirection); j++ { + for range i.indirection { out = append(out, 'I') } diff --git a/repo/repo_benchmarks_test.go b/repo/repo_benchmarks_test.go index a915a0594d..13d0105d9d 100644 --- a/repo/repo_benchmarks_test.go +++ b/repo/repo_benchmarks_test.go @@ -23,7 +23,7 @@ func BenchmarkWriterDedup1M(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { // write exactly the same data writer := env.RepositoryWriter.NewObjectWriter(ctx, object.WriterOptions{}) writer.Write(dataBuf) @@ -43,7 +43,7 @@ func BenchmarkWriterNoDedup1M(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { // write exactly the same data writer := env.RepositoryWriter.NewObjectWriter(ctx, object.WriterOptions{}) diff --git a/repo/repository_test.go b/repo/repository_test.go index d1b658b8f3..6588f181ca 100644 --- a/repo/repository_test.go +++ b/repo/repository_test.go @@ -207,7 +207,7 @@ func verify(ctx context.Context, t *testing.T, rep repo.Repository, objectID obj return } - for i := 0; i < 20; i++ { + for range 20 { sampleSize := int(rand.Int31n(300)) seekOffset := int(rand.Int31n(int32(len(expectedData)))) diff --git a/repo/splitter/splitter_test.go b/repo/splitter/splitter_test.go index c9a09d2366..8e0754694b 100644 --- a/repo/splitter/splitter_test.go +++ b/repo/splitter/splitter_test.go @@ -71,16 +71,12 @@ func TestSplitterStability(t *testing.T) { } for _, tc := range cases { - tc := tc t.Run(fmt.Sprintf("%v", tc), func(t *testing.T) { t.Parallel() for name, getSplitPointsFunc := range getSplitPointsFunctions { - name := name - getSplitPointsFunc := getSplitPointsFunc - t.Run(name, func(t *testing.T) { - for repeat := 0; repeat < numRepeats; repeat++ { + for range numRepeats { s := tc.factory() if got, want := s.MaxSegmentSize(), tc.maxSplit; got != want { diff --git a/site/.go-version b/site/.go-version index dbfae7a029..d71259759d 100644 --- a/site/.go-version +++ b/site/.go-version @@ -1 +1 @@ -1.21.x +1.22.x diff --git a/snapshot/manager.go b/snapshot/manager.go index dd23c74038..41e5625187 100644 --- a/snapshot/manager.go +++ b/snapshot/manager.go @@ -167,9 +167,10 @@ func LoadSnapshots(ctx context.Context, rep repo.Repository, manifestIDs []manif }(i, n) } - for i := 0; i < cap(sem); i++ { + for range cap(sem) { sem <- true } + close(sem) successful := result[:0] diff --git a/snapshot/policy/policy_manager_test.go b/snapshot/policy/policy_manager_test.go index a631db20d9..5eaa0dc714 100644 --- a/snapshot/policy/policy_manager_test.go +++ b/snapshot/policy/policy_manager_test.go @@ -131,8 +131,6 @@ func TestPolicyManagerInheritanceTest(t *testing.T) { } for _, tc := range cases { - tc := tc - t.Run(tc.sourceInfo.String(), func(t *testing.T) { pol, def, src, err := GetEffectivePolicy(ctx, env.RepositoryWriter, tc.sourceInfo) if err != nil { @@ -430,7 +428,6 @@ func TestApplicablePoliciesForSource(t *testing.T) { } for _, tc := range cases { - tc := tc t.Run(tc.si.String(), func(t *testing.T) { res, err := applicablePoliciesForSource(ctx, env.RepositoryWriter, tc.si, nil) if err != nil { diff --git a/snapshot/policy/policy_merge_test.go b/snapshot/policy/policy_merge_test.go index dd4c337511..84d1276b9a 100644 --- a/snapshot/policy/policy_merge_test.go +++ b/snapshot/policy/policy_merge_test.go @@ -33,7 +33,7 @@ func ensureTypesMatch(t *testing.T, policyType, definitionType reflect.Type) { sourceInfoType := reflect.TypeOf(snapshot.SourceInfo{}) - for i := 0; i < policyType.NumField(); i++ { + for i := range policyType.NumField() { f := policyType.Field(i) dt, ok := definitionType.FieldByName(f.Name) @@ -60,7 +60,7 @@ func TestPolicyMerge(t *testing.T) { //nolint:thelper func testPolicyMerge(t *testing.T, policyType, definitionType reflect.Type, prefix string) { - for i := 0; i < policyType.NumField(); i++ { + for i := range policyType.NumField() { f := policyType.Field(i) dt, ok := definitionType.FieldByName(f.Name) diff --git a/snapshot/policy/retention_policy_test.go b/snapshot/policy/retention_policy_test.go index a1b23a7c47..6a168e04ce 100644 --- a/snapshot/policy/retention_policy_test.go +++ b/snapshot/policy/retention_policy_test.go @@ -161,8 +161,6 @@ func TestRetentionPolicyTest(t *testing.T) { } for _, tc := range cases { - tc := tc - t.Run(fmt.Sprintf("%v", tc), func(t *testing.T) { var manifests []*snapshot.Manifest var manifests2 []*snapshot.Manifest diff --git a/snapshot/restore/restore.go b/snapshot/restore/restore.go index 196d267342..abf22d2886 100644 --- a/snapshot/restore/restore.go +++ b/snapshot/restore/restore.go @@ -275,8 +275,6 @@ func (c *copier) copyDirectoryContent(ctx context.Context, d fs.Directory, targe onItemCompletion := parallelwork.OnNthCompletion(len(entries), onCompletion) for _, e := range entries { - e := e - if e.IsDir() { c.stats.EnqueuedDirCount.Add(1) // enqueue directories first, so that we quickly determine the total number and size of items. diff --git a/snapshot/snapshotfs/snapshot_verifier.go b/snapshot/snapshotfs/snapshot_verifier.go index 4534fcc29e..d23b9e55ef 100644 --- a/snapshot/snapshotfs/snapshot_verifier.go +++ b/snapshot/snapshotfs/snapshot_verifier.go @@ -146,7 +146,7 @@ func (v *Verifier) InParallel(ctx context.Context, enqueue func(tw *TreeWalker) v.fileWorkQueue = make(chan verifyFileWorkItem, v.opts.FileQueueLength) - for i := 0; i < v.opts.Parallelism; i++ { + for range v.opts.Parallelism { v.workersWG.Add(1) go func() { diff --git a/snapshot/snapshotfs/upload.go b/snapshot/snapshotfs/upload.go index f496c5edfc..f55ec223cb 100644 --- a/snapshot/snapshotfs/upload.go +++ b/snapshot/snapshotfs/upload.go @@ -178,11 +178,10 @@ func (u *Uploader) uploadFileInternal(ctx context.Context, parentCheckpointRegis var wg workshare.AsyncGroup[*uploadWorkItem] defer wg.Close() - for i := 0; i < len(parts); i++ { - i := i + for i := range parts { offset := int64(i) * chunkSize - length := chunkSize + length := chunkSize //nolint:copyloopvar if i == len(parts)-1 { // last part has unknown length to accommodate the file that may be growing as we're snapshotting it length = -1 diff --git a/snapshot/snapshotfs/upload_test.go b/snapshot/snapshotfs/upload_test.go index 7ebea0c44d..99284d70b3 100644 --- a/snapshot/snapshotfs/upload_test.go +++ b/snapshot/snapshotfs/upload_test.go @@ -424,7 +424,6 @@ func TestUpload_ErrorEntries(t *testing.T) { } for _, tc := range cases { - tc := tc t.Run(tc.desc, func(t *testing.T) { u := NewUploader(th.repo) @@ -657,8 +656,6 @@ func TestUploadWithCheckpointing(t *testing.T) { } for _, d := range dirsToCheckpointAt { - d := d - d.OnReaddir(func() { t.Logf("onReadDir %v %s", d.Name(), debug.Stack()) // trigger checkpoint @@ -1102,14 +1099,14 @@ func TestParallelUploadDedup(t *testing.T) { // 10 identical non-compressible files, 50MB each var files []*os.File - for i := 0; i < 10; i++ { + for i := range 10 { f, cerr := os.Create(filepath.Join(td, fmt.Sprintf("file-%v", i))) require.NoError(t, cerr) files = append(files, f) } - for j := 0; j < 1000; j++ { + for range 1000 { buf := make([]byte, 50000) rand.Read(buf) @@ -1163,14 +1160,14 @@ func TestParallelUploadOfLargeFiles(t *testing.T) { // Write 2 x 50MB files var files []*os.File - for i := 0; i < 2; i++ { + for i := range 2 { f, cerr := os.Create(filepath.Join(td, fmt.Sprintf("file-%v", i))) require.NoError(t, cerr) files = append(files, f) } - for j := 0; j < 1000; j++ { + for range 1000 { buf := make([]byte, 50000) for _, f := range files { diff --git a/tests/end_to_end_test/all_formats_test.go b/tests/end_to_end_test/all_formats_test.go index 2e6c2a46d5..a2cc849c61 100644 --- a/tests/end_to_end_test/all_formats_test.go +++ b/tests/end_to_end_test/all_formats_test.go @@ -25,12 +25,8 @@ func TestAllFormatsSmokeTest(t *testing.T) { }, nil) for _, encryptionAlgo := range encryption.SupportedAlgorithms(false) { - encryptionAlgo := encryptionAlgo - t.Run(encryptionAlgo, func(t *testing.T) { for _, hashAlgo := range hashing.SupportedAlgorithms() { - - hashAlgo := hashAlgo t.Run(hashAlgo, func(t *testing.T) { t.Parallel() diff --git a/tests/end_to_end_test/api_server_repository_test.go b/tests/end_to_end_test/api_server_repository_test.go index 1e8b7a6e1c..66e6487706 100644 --- a/tests/end_to_end_test/api_server_repository_test.go +++ b/tests/end_to_end_test/api_server_repository_test.go @@ -361,7 +361,7 @@ func TestFindManifestsPaginationOverGRPC(t *testing.T) { // add about 36 MB worth of manifests require.NoError(t, repo.WriteSession(ctx, rep, repo.WriteSessionOptions{}, func(ctx context.Context, w repo.RepositoryWriter) error { - for i := 0; i < numManifests; i++ { + for range numManifests { uniqueID := strings.Repeat(uuid.NewString(), 100) require.Len(t, uniqueID, 3600) diff --git a/tests/end_to_end_test/auto_update_test.go b/tests/end_to_end_test/auto_update_test.go index 66badb873c..7b4c1375ee 100644 --- a/tests/end_to_end_test/auto_update_test.go +++ b/tests/end_to_end_test/auto_update_test.go @@ -36,8 +36,6 @@ func TestAutoUpdateEnableTest(t *testing.T) { os.Unsetenv("KOPIA_CHECK_FOR_UPDATES") for _, tc := range cases { - tc := tc - t.Run(tc.desc, func(t *testing.T) { t.Parallel() runner := testenv.NewInProcRunner(t) diff --git a/tests/end_to_end_test/restore_test.go b/tests/end_to_end_test/restore_test.go index 14b258e4d6..163ec540f2 100644 --- a/tests/end_to_end_test/restore_test.go +++ b/tests/end_to_end_test/restore_test.go @@ -297,7 +297,6 @@ func TestSnapshotRestore(t *testing.T) { t.Run("modes", func(t *testing.T) { for _, tc := range cases { - tc := tc t.Run(tc.fname, func(t *testing.T) { t.Parallel() fname := filepath.Join(restoreArchiveDir, tc.fname) @@ -688,8 +687,6 @@ func TestSnapshotSparseRestore(t *testing.T) { } for _, c := range cases { - c := c - t.Run(c.name, func(t *testing.T) { if c.name == "blk_hole_on_buf_boundary" && runtime.GOARCH == "arm64" { t.Skip("skipping on arm64 due to a failure - https://github.com/kopia/kopia/issues/3178") diff --git a/tests/end_to_end_test/shallowrestore_test.go b/tests/end_to_end_test/shallowrestore_test.go index 131c15f70b..ce3114ee70 100644 --- a/tests/end_to_end_test/shallowrestore_test.go +++ b/tests/end_to_end_test/shallowrestore_test.go @@ -946,7 +946,7 @@ func verifyShallowVsOriginalFile(t *testing.T, rdc *repoDirEntryCache, shallow, func makeLongName(c rune) string { // TODO(rjk): not likely to work on plan9. buffy := make([]byte, 0, restore.MaxFilenameLength) - for i := 0; i < restore.MaxFilenameLength; i++ { + for range restore.MaxFilenameLength { buffy = append(buffy, byte(c)) } diff --git a/tests/end_to_end_test/snapshot_actions_test.go b/tests/end_to_end_test/snapshot_actions_test.go index ec102f3d2c..cfc0d565ed 100644 --- a/tests/end_to_end_test/snapshot_actions_test.go +++ b/tests/end_to_end_test/snapshot_actions_test.go @@ -315,8 +315,6 @@ func TestSnapshotActionsEnable(t *testing.T) { } for _, tc := range cases { - tc := tc - t.Run(tc.desc, func(t *testing.T) { t.Parallel() diff --git a/tests/end_to_end_test/snapshot_create_test.go b/tests/end_to_end_test/snapshot_create_test.go index 2ec4a23768..48aeda6748 100644 --- a/tests/end_to_end_test/snapshot_create_test.go +++ b/tests/end_to_end_test/snapshot_create_test.go @@ -504,7 +504,6 @@ func TestSnapshotCreateWithIgnore(t *testing.T) { } for _, tc := range cases { - tc := tc t.Run(tc.desc, func(t *testing.T) { runner := testenv.NewInProcRunner(t) e := testenv.NewCLITest(t, testenv.RepoFormatNotImportant, runner) diff --git a/tests/end_to_end_test/snapshot_fail_test.go b/tests/end_to_end_test/snapshot_fail_test.go index a260d0982d..3e40ce0ccd 100644 --- a/tests/end_to_end_test/snapshot_fail_test.go +++ b/tests/end_to_end_test/snapshot_fail_test.go @@ -121,7 +121,7 @@ func testSnapshotFail( ) // Test the root dir permissions - for ti, tt := range []struct { + for tcIdx, tc := range []struct { desc string modifyEntry string snapSource string @@ -251,8 +251,6 @@ func testSnapshotFail( // Reference test conditions outside of range variables to satisfy linter tcIgnoreDirErr := ignoreDirErr tcIgnoreFileErr := ignoreFileErr - tcIdx := ti - tc := tt tname := fmt.Sprintf("%s_ignoreFileErr_%s_ignoreDirErr_%s_failFast_%v", tc.desc, ignoreDirErr, ignoreFileErr, isFailFast) t.Run(tname, func(t *testing.T) { diff --git a/tests/end_to_end_test/snapshot_migrate_test.go b/tests/end_to_end_test/snapshot_migrate_test.go index c3770d87c3..78cf4df8e2 100644 --- a/tests/end_to_end_test/snapshot_migrate_test.go +++ b/tests/end_to_end_test/snapshot_migrate_test.go @@ -37,7 +37,7 @@ func (s *formatSpecificTestSuite) TestSnapshotMigrate(t *testing.T) { compressibleDir := testutil.TempDirectory(t) - for i := 0; i < 10; i++ { + for range 10 { require.NoError(t, writeCompressibleFile(filepath.Join(compressibleDir, uuid.NewString()))) } @@ -114,10 +114,10 @@ func writeCompressibleFile(fname string) error { defer f.Close() // 1000 x 64000 - for i := 0; i < 1000; i++ { + for range 1000 { val := uuid.NewString() - for j := 0; j < 100; j++ { + for range 100 { if _, err := f.WriteString(val); err != nil { return err } diff --git a/tests/endurance_test/endurance_test.go b/tests/endurance_test/endurance_test.go index 566cfc18ea..735cf38b6a 100644 --- a/tests/endurance_test/endurance_test.go +++ b/tests/endurance_test/endurance_test.go @@ -92,9 +92,7 @@ func TestEndurance(t *testing.T) { rwMutex := &sync.RWMutex{} t.Run("Runners", func(t *testing.T) { - for i := 0; i < enduranceRunnerCount; i++ { - i := i - + for i := range enduranceRunnerCount { t.Run(fmt.Sprintf("Runner-%v", i), func(t *testing.T) { t.Parallel() defer func() { diff --git a/tests/repository_stress_test/repository_stress_test.go b/tests/repository_stress_test/repository_stress_test.go index 2b02c11362..5578ef1a56 100644 --- a/tests/repository_stress_test/repository_stress_test.go +++ b/tests/repository_stress_test/repository_stress_test.go @@ -226,7 +226,7 @@ func runStress(t *testing.T, opt *StressOptions) { var configFiles []string // set up two parallel kopia connections, each with its own config file and cache. - for i := 0; i < opt.ConfigsPerRepository; i++ { + for i := range opt.ConfigsPerRepository { configFile := filepath.Join(tmpPath, fmt.Sprintf("kopia-%v.config", i)) configFiles = append(configFiles, configFile) @@ -256,11 +256,7 @@ func runStress(t *testing.T, opt *StressOptions) { defer logFile.Close() for _, configFile := range configFiles { - configFile := configFile - - for i := 0; i < opt.OpenRepositoriesPerConfig; i++ { - i := i - + for i := range opt.OpenRepositoriesPerConfig { eg.Go(func() error { log := testlogging.Printf(func(msg string, args ...interface{}) { fmt.Fprintf(logFile, clock.Now().Format("2006-01-02T15:04:05.000000Z07:00")+" "+msg+"\n", args...) @@ -302,7 +298,7 @@ func longLivedRepositoryTest(ctx context.Context, t *testing.T, configFile strin eg, ctx := errgroup.WithContext(ctx) - for i := 0; i < opt.SessionsPerOpenRepository; i++ { + for i := range opt.SessionsPerOpenRepository { ors := or.NewSession() _, w, err := rep.(repo.DirectRepository).NewDirectWriter(ctx, repo.WriteSessionOptions{ @@ -312,7 +308,7 @@ func longLivedRepositoryTest(ctx context.Context, t *testing.T, configFile strin return errors.Wrap(err, "error opening writer") } - for j := 0; j < opt.WorkersPerSession; j++ { + for j := range opt.WorkersPerSession { log2 := log.With("worker", fmt.Sprintf("s%vw%v::", i, j)) eg.Go(func() error { diff --git a/tests/robustness/engine/action.go b/tests/robustness/engine/action.go index 850925f990..64ed6ebd79 100644 --- a/tests/robustness/engine/action.go +++ b/tests/robustness/engine/action.go @@ -42,7 +42,7 @@ func (e *Engine) ExecAction(ctx context.Context, actionKey ActionKey, opts map[s var out map[string]string n := robustness.GetOptAsIntOrDefault(ActionRepeaterField, opts, defaultActionRepeats) - for i := 0; i < n; i++ { + for range n { out, err = action.f(ctx, e, opts, logEntry) if err != nil { break diff --git a/tests/robustness/engine/engine_test.go b/tests/robustness/engine/engine_test.go index ee8b8cb5f7..e3d490fe71 100644 --- a/tests/robustness/engine/engine_test.go +++ b/tests/robustness/engine/engine_test.go @@ -152,7 +152,7 @@ func makeTempS3Bucket(t *testing.T) (bucketName string, cleanupCB func()) { var err error - for retry := 0; retry < retries; retry++ { + for range retries { time.Sleep(retryPeriod) err = cli.RemoveBucket(ctx, bucketName) @@ -485,7 +485,7 @@ func TestPickActionWeighted(t *testing.T) { numTestLoops := 100000 results := make(map[ActionKey]int, len(tc.inputCtrlWeights)) - for loop := 0; loop < numTestLoops; loop++ { + for range numTestLoops { results[pickActionWeighted(inputCtrlOpts, tc.inputActionList)]++ } @@ -539,7 +539,7 @@ func TestActionsFilesystem(t *testing.T) { } numActions := 10 - for loop := 0; loop < numActions; loop++ { + for range numActions { err := eng.RandomAction(ctx, actionOpts) if !(err == nil || errors.Is(err, robustness.ErrNoOp)) { t.Error("Hit error", err) @@ -586,7 +586,7 @@ func TestActionsS3(t *testing.T) { } numActions := 10 - for loop := 0; loop < numActions; loop++ { + for range numActions { err := eng.RandomAction(ctx, actionOpts) if !(err == nil || errors.Is(err, robustness.ErrNoOp)) { t.Error("Hit error", err) diff --git a/tests/robustness/multiclient_test/framework/harness.go b/tests/robustness/multiclient_test/framework/harness.go index 36037f0352..49ff1f1cec 100644 --- a/tests/robustness/multiclient_test/framework/harness.go +++ b/tests/robustness/multiclient_test/framework/harness.go @@ -210,7 +210,6 @@ func (th *TestHarness) Run( //nolint:thelper testNum := 0 for _, ctx := range ctxs { - ctx := ctx testNum++ t.Run(strconv.Itoa(testNum), func(t *testing.T) { diff --git a/tests/robustness/pathlock/path_lock_test.go b/tests/robustness/pathlock/path_lock_test.go index 49d4f45f1c..5db08f8746 100644 --- a/tests/robustness/pathlock/path_lock_test.go +++ b/tests/robustness/pathlock/path_lock_test.go @@ -284,7 +284,7 @@ func TestPathLockRace(t *testing.T) { wg := new(sync.WaitGroup) numGoroutines := 100 - for i := 0; i < numGoroutines; i++ { + for range numGoroutines { wg.Add(1) go func() { @@ -293,7 +293,7 @@ func TestPathLockRace(t *testing.T) { // Pick from three different path values that should all be // covered by the same lock. path := "/some/path/a/b/c" - for i := 0; i < rand.Intn(3); i++ { + for range rand.Intn(3) { path = filepath.Dir(path) } diff --git a/tests/robustness/snapmeta/kopia_persister_light_test.go b/tests/robustness/snapmeta/kopia_persister_light_test.go index b1eb3c095a..280ebffb00 100644 --- a/tests/robustness/snapmeta/kopia_persister_light_test.go +++ b/tests/robustness/snapmeta/kopia_persister_light_test.go @@ -47,9 +47,8 @@ func TestConcurrency(t *testing.T) { vals := [][]byte{[]byte("val1"), []byte("val2"), []byte("val3")} t.Run("storeLoad", func(t *testing.T) { - for i := 0; i < 9; i++ { - j := i - t.Run(strconv.Itoa(i), func(t *testing.T) { + for j := range 9 { + t.Run(strconv.Itoa(j), func(t *testing.T) { t.Parallel() kpl.testStoreLoad(ctx, t, keys[j%3], vals[j%3]) }) @@ -57,9 +56,8 @@ func TestConcurrency(t *testing.T) { }) t.Run("delete", func(t *testing.T) { - for i := 0; i < 9; i++ { - j := i - t.Run(strconv.Itoa(i), func(t *testing.T) { + for j := range 9 { + t.Run(strconv.Itoa(j), func(t *testing.T) { t.Parallel() kpl.testDelete(ctx, t, keys[j%3]) }) diff --git a/tests/stress_test/stress_test.go b/tests/stress_test/stress_test.go index 0bfd1b3c94..eaf8f0f4e9 100644 --- a/tests/stress_test/stress_test.go +++ b/tests/stress_test/stress_test.go @@ -70,8 +70,7 @@ func stressTestWithStorage(t *testing.T, st blob.Storage, duration time.Duration deadline := clock.Now().Add(duration) t.Run("workers", func(t *testing.T) { - for i := 0; i < goroutineCount; i++ { - i := i + for i := range goroutineCount { t.Run(fmt.Sprintf("worker-%v", i), func(t *testing.T) { t.Parallel() stressWorker(ctx, t, deadline, openMgr, int64(seed0+i)) diff --git a/tests/testdirtree/testdirtree.go b/tests/testdirtree/testdirtree.go index a463c944c5..6d0ba8f237 100644 --- a/tests/testdirtree/testdirtree.go +++ b/tests/testdirtree/testdirtree.go @@ -206,7 +206,7 @@ func createDirectoryTreeInternal(dirname string, options DirectoryTreeOptions, c childOptions.Depth-- numSubDirs := rand.Intn(options.MaxSubdirsPerDirectory) + 1 - for i := 0; i < numSubDirs; i++ { + for range numSubDirs { subdirName := randomName(options) if err := createDirectoryTreeInternal(filepath.Join(dirname, subdirName), childOptions, counters); err != nil { @@ -219,7 +219,7 @@ func createDirectoryTreeInternal(dirname string, options DirectoryTreeOptions, c if options.MaxFilesPerDirectory > 0 { numFiles := rand.Intn(options.MaxFilesPerDirectory) + 1 - for i := 0; i < numFiles; i++ { + for range numFiles { fileName := randomName(options) if err := createRandomFile(filepath.Join(dirname, fileName), options, counters); err != nil { @@ -232,7 +232,7 @@ func createDirectoryTreeInternal(dirname string, options DirectoryTreeOptions, c if options.MaxSymlinksPerDirectory > 0 { numSymlinks := rand.Intn(options.MaxSymlinksPerDirectory) + 1 - for i := 0; i < numSymlinks; i++ { + for range numSymlinks { fileName := randomName(options) if err := createRandomSymlink(filepath.Join(dirname, fileName), fileNames, options, counters); err != nil { diff --git a/tests/tools/kopiarunner/kopia_snapshotter_exe_test.go b/tests/tools/kopiarunner/kopia_snapshotter_exe_test.go index dcdcb63e82..1b37e0a23f 100644 --- a/tests/tools/kopiarunner/kopia_snapshotter_exe_test.go +++ b/tests/tools/kopiarunner/kopia_snapshotter_exe_test.go @@ -42,7 +42,7 @@ func TestParseSnapListAllExeTest(t *testing.T) { fmt.Println(snapIDIsLastInList("asdf", snapIDListSnap)) const numSnapsToTest = 5 - for snapCount := 0; snapCount < numSnapsToTest; snapCount++ { + for snapCount := range numSnapsToTest { snapID, err := ks.CreateSnapshot(sourceDir) require.NoError(t, err) diff --git a/tools/gettool/autodownload/autodownload.go b/tools/gettool/autodownload/autodownload.go index 3caffb16f4..9d022f4cfa 100644 --- a/tools/gettool/autodownload/autodownload.go +++ b/tools/gettool/autodownload/autodownload.go @@ -171,7 +171,7 @@ func Download(url, dir string, checksum map[string]string, stripPathComponents i nextSleepTime := initialSleepTime - for i := 0; i < maxRetries; i++ { + for i := range maxRetries { err := downloadInternal(url, dir, checksum, stripPathComponents) if err == nil { // success