|
9 | 9 | "fmt" |
10 | 10 | "sort" |
11 | 11 | "strings" |
| 12 | + "sync/atomic" |
12 | 13 | "testing" |
13 | 14 | "time" |
14 | 15 |
|
@@ -195,3 +196,77 @@ func TestCleanupManagerCloseWithPacing(t *testing.T) { |
195 | 196 | t.Fatalf("timed out waiting for cleanupManager.Close() to return") |
196 | 197 | } |
197 | 198 | } |
| 199 | + |
| 200 | +// TestCleanupManagerFallingBehind verifies that we disable pacing when the jobs |
| 201 | +// channel reaches the high threshold. |
| 202 | +func TestCleanupManagerFallingBehind(t *testing.T) { |
| 203 | + mem := vfs.NewMem() |
| 204 | + var rate atomic.Int32 |
| 205 | + rate.Store(10 * MB) // 10MB/s |
| 206 | + opts := &Options{ |
| 207 | + FS: mem, |
| 208 | + FreeSpaceThresholdBytes: 1, |
| 209 | + TargetByteDeletionRate: func() int { return int(rate.Load()) }, // 10 MB/s |
| 210 | + } |
| 211 | + opts.EnsureDefaults() |
| 212 | + |
| 213 | + objProvider, err := objstorageprovider.Open(objstorageprovider.Settings{ |
| 214 | + FS: mem, |
| 215 | + FSDirName: "/", |
| 216 | + }) |
| 217 | + require.NoError(t, err) |
| 218 | + defer objProvider.Close() |
| 219 | + |
| 220 | + getDeletePacerInfo := func() deletionPacerInfo { |
| 221 | + return deletionPacerInfo{ |
| 222 | + freeBytes: 10 * GB, |
| 223 | + liveBytes: 10 * GB, |
| 224 | + } |
| 225 | + } |
| 226 | + |
| 227 | + cm := openCleanupManager(opts, objProvider, getDeletePacerInfo) |
| 228 | + |
| 229 | + x := 0 |
| 230 | + addJob := func(fileSize int) { |
| 231 | + x++ |
| 232 | + cm.EnqueueJob(1, []obsoleteFile{{ |
| 233 | + fileType: base.FileTypeTable, |
| 234 | + fs: mem, |
| 235 | + path: fmt.Sprintf("test%02d.sst", x), |
| 236 | + fileNum: base.DiskFileNum(x), |
| 237 | + fileSize: uint64(fileSize), |
| 238 | + isLocal: true, |
| 239 | + }}, obsoleteObjectStats{}) |
| 240 | + } |
| 241 | + |
| 242 | + for range jobsQueueLowThreshold { |
| 243 | + addJob(1 * MB) |
| 244 | + } |
| 245 | + // At 1MB, each job will take 100ms each. Note that the rate increase based on |
| 246 | + // history won't make much difference, since the enqueued size is averaged |
| 247 | + // over 5 minutes. |
| 248 | + time.Sleep(50 * time.Millisecond) |
| 249 | + require.Greater(t, len(cm.jobsCh), jobsQueueLowThreshold/2) |
| 250 | + t.Logf("%d", len(cm.jobsCh)) |
| 251 | + |
| 252 | + // Add enough jobs to exceed the high threshold. We add small jobs so that the |
| 253 | + // historic rate doesn't grow significantly. |
| 254 | + require.Greater(t, jobsQueueDepth, jobsQueueHighThreshold+jobsQueueLowThreshold) |
| 255 | + t.Logf("B") |
| 256 | + for range jobsQueueHighThreshold { |
| 257 | + addJob(1) |
| 258 | + } |
| 259 | + |
| 260 | + for i := 0; ; i++ { |
| 261 | + time.Sleep(10 * time.Millisecond) |
| 262 | + if len(cm.jobsCh) <= jobsQueueHighThreshold { |
| 263 | + break |
| 264 | + } |
| 265 | + if i == 1000 { |
| 266 | + t.Fatalf("jobs channel length never dropped below high threshold (%d vs %d)", len(cm.jobsCh), jobsQueueHighThreshold) |
| 267 | + } |
| 268 | + } |
| 269 | + // Set a high rate so the rest of the jobs finish quickly. |
| 270 | + rate.Store(1 * GB) |
| 271 | + cm.Close() |
| 272 | +} |
0 commit comments