Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

resource_manager/client: introduce RU and Request metrics #6170

Merged
merged 9 commits into from Mar 20, 2023
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
80 changes: 70 additions & 10 deletions client/resource_group/controller/controller.go
Expand Up @@ -26,15 +26,17 @@ import (
"github.com/pingcap/failpoint"
rmpb "github.com/pingcap/kvproto/pkg/resource_manager"
"github.com/pingcap/log"
"github.com/prometheus/client_golang/prometheus"
pd "github.com/tikv/pd/client"
"github.com/tikv/pd/client/errs"
"go.uber.org/zap"
)

const (
requestUnitConfigPath = "resource_group/ru_config"
maxRetry = 3
maxNotificationChanLen = 200
requestUnitConfigPath = "resource_group/ru_config"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

need consist with #6063

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This will be resolved automatically after merging #6063

maxRetry = 3
maxNotificationChanLen = 200
maxConsumptionMetricsChanLen = 200
)

type selectType int
Expand Down Expand Up @@ -104,6 +106,8 @@ type ResourceGroupsController struct {
// currentRequests is used to record the request and resource group.
// Currently, we don't do multiple `AcquireTokenBuckets`` at the same time, so there are no concurrency problems with `currentRequests`.
currentRequests []*rmpb.TokenBucketRequest

consumptionDispatcher chan *rmpb.TokenBucketRequest
}

// NewResourceGroupController returns a new ResourceGroupsController which impls ResourceGroupKVInterceptor
Expand All @@ -129,6 +133,7 @@ func NewResourceGroupController(
lowTokenNotifyChan: make(chan struct{}, 1),
tokenResponseChan: make(chan []*rmpb.TokenBucketResponse, 1),
tokenBucketUpdateChan: make(chan *groupCostController, maxNotificationChanLen),
consumptionDispatcher: make(chan *rmpb.TokenBucketRequest, maxConsumptionMetricsChanLen),
}
for _, opt := range opts {
opt(controller)
Expand Down Expand Up @@ -181,6 +186,7 @@ func (c *ResourceGroupsController) Start(ctx context.Context) {
for {
select {
case <-c.loopCtx.Done():
c.resetMetrics()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need to reset resourceGroupStatusGauge for each resource group when it's deleted from the client?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I do it in cleanUpResourceGroup

return
case resp := <-c.tokenResponseChan:
if resp != nil {
Expand Down Expand Up @@ -210,6 +216,7 @@ func (c *ResourceGroupsController) Start(ctx context.Context) {
}
}
}()
go c.backgroundMetricsFlush(ctx)
}

// Stop stops ResourceGroupController service.
Expand All @@ -221,6 +228,38 @@ func (c *ResourceGroupsController) Stop() error {
return nil
}

// Receive the consumption and flush it to the metrics.
func (c *ResourceGroupsController) backgroundMetricsFlush(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case req := <-c.consumptionDispatcher:
consumption := req.GetConsumptionSinceLastRequest()
if consumption == nil {
continue
}
var (
name = req.GetResourceGroupName()
rruMetrics = readRequestUnitCost.WithLabelValues(name)
wruMetrics = writeRequestUnitCost.WithLabelValues(name)
)
resourceGroupTokenRequestCounter.WithLabelValues(name).Inc()
// RU info.
if consumption.RRU != 0 {
rruMetrics.Observe(consumption.RRU)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does it's duplicated with the server side?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JmPotato @BornChanger Do u think it is necessary to count RU consumption of tidb instances?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

may do not need in current.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I didn't really think about the need to view RU consumption at the instance level, so not keeping a record is okay for me.

}
if consumption.WRU != 0 {
wruMetrics.Observe(consumption.WRU)
}
}
}
}

func (c *ResourceGroupsController) resetMetrics() {
resourceGroupStatusGauge.Reset()
}
JmPotato marked this conversation as resolved.
Show resolved Hide resolved

// tryGetResourceGroup will try to get the resource group controller from local cache first,
// if the local cache misses, it will then call gRPC to fetch the resource group info from server.
func (c *ResourceGroupsController) tryGetResourceGroup(ctx context.Context, name string) (*groupCostController, error) {
Expand All @@ -239,7 +278,7 @@ func (c *ResourceGroupsController) tryGetResourceGroup(ctx context.Context, name
return gc, nil
}
// Initialize the resource group controller.
gc, err := newGroupCostController(group, c.config, c.lowTokenNotifyChan, c.tokenBucketUpdateChan)
gc, err := newGroupCostController(group, c.config, c.lowTokenNotifyChan, c.tokenBucketUpdateChan, successfulRequestDuration.WithLabelValues(group.Name), failedRequestCounter.WithLabelValues(group.Name))
if err != nil {
return nil, err
}
Expand All @@ -248,6 +287,7 @@ func (c *ResourceGroupsController) tryGetResourceGroup(ctx context.Context, name
// Check again to prevent initializing the same resource group concurrently.
tmp, loaded := c.groupsController.LoadOrStore(group.GetName(), gc)
if !loaded {
resourceGroupStatusGauge.WithLabelValues(name).Set(1)
log.Info("[resource group controller] create resource group cost controller", zap.String("name", group.GetName()))
}
return tmp.(*groupCostController), nil
Expand All @@ -266,6 +306,7 @@ func (c *ResourceGroupsController) cleanUpResourceGroup(ctx context.Context) err
resourceGroupName := key.(string)
if _, ok := latestGroups[resourceGroupName]; !ok {
c.groupsController.Delete(key)
resourceGroupStatusGauge.DeleteLabelValues(resourceGroupName)
return true
}

Expand All @@ -277,6 +318,7 @@ func (c *ResourceGroupsController) cleanUpResourceGroup(ctx context.Context) err
if equalRU(latestConsumption, *gc.run.consumption) {
if gc.tombstone {
c.groupsController.Delete(resourceGroupName)
resourceGroupStatusGauge.DeleteLabelValues(resourceGroupName)
return true
}
gc.tombstone = true
Expand Down Expand Up @@ -325,6 +367,7 @@ func (c *ResourceGroupsController) collectTokenBucketRequests(ctx context.Contex
if request != nil {
c.currentRequests = append(c.currentRequests, request)
}
c.consumptionDispatcher <- request
return true
})
if len(c.currentRequests) > 0 {
Expand All @@ -341,14 +384,18 @@ func (c *ResourceGroupsController) sendTokenBucketRequests(ctx context.Context,
go func() {
log.Debug("[resource group controller] send token bucket request", zap.Time("now", now), zap.Any("req", req.Requests), zap.String("source", source))
resp, err := c.provider.AcquireTokenBuckets(ctx, req)
latency := time.Since(now)
if err != nil {
// Don't log any errors caused by the stopper canceling the context.
if !errors.ErrorEqual(err, context.Canceled) {
log.L().Sugar().Infof("[resource group controller] token bucket rpc error: %v", err)
}
resp = nil
failedTokenRequestCounter.Inc()
} else {
successfulTokenRequestDuration.Observe(latency.Seconds())
}
log.Debug("[resource group controller] token bucket response", zap.Time("now", time.Now()), zap.Any("resp", resp), zap.String("source", source), zap.Duration("latency", time.Since(now)))
log.Debug("[resource group controller] token bucket response", zap.Time("now", time.Now()), zap.Any("resp", resp), zap.String("source", source), zap.Duration("latency", latency))
c.tokenResponseChan <- resp
}()
}
Expand All @@ -359,6 +406,7 @@ func (c *ResourceGroupsController) OnRequestWait(
) (*rmpb.Consumption, error) {
gc, err := c.tryGetResourceGroup(ctx, resourceGroupName)
if err != nil {
failedRequestCounter.WithLabelValues(resourceGroupName).Inc()
return nil, err
}
return gc.onRequestWait(ctx, info)
Expand All @@ -384,6 +432,9 @@ type groupCostController struct {

handleRespFunc func(*rmpb.TokenBucketResponse)

successfulRequestDuration prometheus.Observer
failedRequestCounter prometheus.Counter

mu struct {
sync.Mutex
consumption *rmpb.Consumption
Expand Down Expand Up @@ -456,6 +507,8 @@ func newGroupCostController(
mainCfg *Config,
lowRUNotifyChan chan struct{},
tokenBucketUpdateChan chan *groupCostController,
successfulRequestDuration prometheus.Observer,
failedRequestCounter prometheus.Counter,
) (*groupCostController, error) {
switch group.Mode {
case rmpb.GroupMode_RUMode:
Expand All @@ -467,8 +520,10 @@ func newGroupCostController(
}

gc := &groupCostController{
ResourceGroup: group,
mainCfg: mainCfg,
ResourceGroup: group,
mainCfg: mainCfg,
successfulRequestDuration: successfulRequestDuration,
failedRequestCounter: failedRequestCounter,
calculators: []ResourceCalculator{
newKVCalculator(mainCfg),
newSQLCalculator(mainCfg),
Expand Down Expand Up @@ -862,8 +917,10 @@ func (gc *groupCostController) onRequestWait(
if !gc.burstable.Load() {
var err error
now := time.Now()
var i int
var d time.Duration
retryLoop:
for i := 0; i < maxRetry; i++ {
for i = 0; i < maxRetry; i++ {
switch gc.mode {
case rmpb.GroupMode_RawMode:
res := make([]*Reservation, 0, len(requestResourceLimitTypeList))
Expand All @@ -872,7 +929,7 @@ func (gc *groupCostController) onRequestWait(
res = append(res, counter.limiter.Reserve(ctx, gc.mainCfg.maxWaitDuration, now, v))
}
}
if err = WaitReservations(ctx, now, res); err == nil {
if d, err = WaitReservations(ctx, now, res); err == nil {
break retryLoop
}
case rmpb.GroupMode_RUMode:
Expand All @@ -882,14 +939,17 @@ func (gc *groupCostController) onRequestWait(
res = append(res, counter.limiter.Reserve(ctx, gc.mainCfg.maxWaitDuration, now, v))
}
}
if err = WaitReservations(ctx, now, res); err == nil {
if d, err = WaitReservations(ctx, now, res); err == nil {
break retryLoop
}
}
time.Sleep(100 * time.Millisecond)
}
if err != nil {
gc.failedRequestCounter.Inc()
return nil, err
} else {
gc.successfulRequestDuration.Observe(d.Seconds())
}
}
gc.mu.Lock()
Expand Down
2 changes: 1 addition & 1 deletion client/resource_group/controller/controller_test.go
Expand Up @@ -42,7 +42,7 @@ func createTestGroupCostController(re *require.Assertions) *groupCostController
}
ch1 := make(chan struct{})
ch2 := make(chan *groupCostController)
gc, err := newGroupCostController(group, DefaultConfig(), ch1, ch2)
gc, err := newGroupCostController(group, DefaultConfig(), ch1, ch2, successfulRequestDuration.WithLabelValues(group.Name), failedRequestCounter.WithLabelValues(group.Name))
re.NoError(err)
return gc
}
Expand Down
12 changes: 6 additions & 6 deletions client/resource_group/controller/limiter.go
Expand Up @@ -396,9 +396,9 @@ func (limit Limit) tokensFromDuration(d time.Duration) float64 {

// WaitReservations is used to process a series of reservations
// so that all limiter tokens are returned if one reservation fails
func WaitReservations(ctx context.Context, now time.Time, reservations []*Reservation) error {
func WaitReservations(ctx context.Context, now time.Time, reservations []*Reservation) (time.Duration, error) {
if len(reservations) == 0 {
return nil
return 0, nil
}
cancel := func() {
for _, res := range reservations {
Expand All @@ -409,27 +409,27 @@ func WaitReservations(ctx context.Context, now time.Time, reservations []*Reserv
for _, res := range reservations {
if !res.ok {
cancel()
return errs.ErrClientResourceGroupThrottled
return 0, errs.ErrClientResourceGroupThrottled
}
delay := res.DelayFrom(now)
if delay > longestDelayDuration {
longestDelayDuration = delay
}
}
if longestDelayDuration <= 0 {
return nil
return 0, nil
}
t := time.NewTimer(longestDelayDuration)
defer t.Stop()

select {
case <-t.C:
// We can proceed.
return nil
return longestDelayDuration, nil
case <-ctx.Done():
// Context was canceled before we could proceed. Cancel the
// reservation, which may permit other events to proceed sooner.
cancel()
return ctx.Err()
return 0, ctx.Err()
}
}
5 changes: 3 additions & 2 deletions client/resource_group/controller/limiter_test.go
Expand Up @@ -152,7 +152,8 @@ func TestCancel(t *testing.T) {
r2 := lim2.Reserve(ctx1, InfDuration, t1, 5)
checkTokens(re, lim1, t2, 7)
checkTokens(re, lim2, t2, 2)
err := WaitReservations(ctx, t2, []*Reservation{r1, r2})
d, err := WaitReservations(ctx, t2, []*Reservation{r1, r2})
re.Equal(d, time.Duration(0))
re.Error(err)
checkTokens(re, lim1, t3, 13)
checkTokens(re, lim2, t3, 3)
Expand All @@ -166,7 +167,7 @@ func TestCancel(t *testing.T) {
var wg sync.WaitGroup
wg.Add(1)
go func() {
err := WaitReservations(ctx2, t3, []*Reservation{r1, r2})
_, err := WaitReservations(ctx2, t3, []*Reservation{r1, r2})
re.Error(err)
wg.Done()
}()
Expand Down