forked from cockroachdb/cockroach
/
limiter.go
56 lines (47 loc) · 1.62 KB
/
limiter.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
// Copyright 2016 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/ccl/LICENSE
package storageccl
import (
"golang.org/x/net/context"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
)
const (
// The number of ExportKeys or Ingest requests that can run at once. Both
// of these requests generally do some read/write from the network and
// cache results to a tmp file. In order to not exhaust the disk or memory,
// or saturate the network, limit the number of these that can be run in
// parallel. This number was chosen by a guess. If SST files are likely to
// not be over 200MB, then 5 parallel workers hopefully won't use more than
// 1GB of space in the tmp directory. It could be improved by more measured
// heuristics.
parallelRequestsLimit = 5
)
var (
parallelRequestsLimiter = make(chan struct{}, parallelRequestsLimit)
)
func beginLimitedRequest(ctx context.Context) error {
// Check to see there's a slot immediately available.
select {
case parallelRequestsLimiter <- struct{}{}:
return nil
default:
}
// If not, start a span and begin waiting.
ctx, span := tracing.ChildSpan(ctx, "beginLimitedRequest")
defer tracing.FinishSpan(span)
// Wait until the context is done or we have a slot.
select {
case <-ctx.Done():
return ctx.Err()
case parallelRequestsLimiter <- struct{}{}:
return nil
}
}
func endLimitedRequest() {
<-parallelRequestsLimiter
}