Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

planner: add monitor for prepared plan cache memory usage #38507

Merged
merged 34 commits into from
Nov 2, 2022
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
6d65432
plancache monitor
fzzf678 Oct 17, 2022
022ad83
Update plan_cache_lru.go
fzzf678 Oct 17, 2022
ea66c50
undo this
fzzf678 Oct 18, 2022
0a87a94
Update tidb.json
fzzf678 Oct 18, 2022
0462a6b
grafana json
fzzf678 Oct 19, 2022
7eb23a6
use defer
fzzf678 Oct 19, 2022
db78523
undo
fzzf678 Oct 19, 2022
130cbab
undo!
fzzf678 Oct 20, 2022
4eb8acc
Merge remote-tracking branch 'upstream/master' into monitor_for_planC…
fzzf678 Oct 21, 2022
b2ffddb
Merge remote-tracking branch 'upstream/master' into monitor_for_planC…
fzzf678 Oct 21, 2022
055c0c6
Update plan_cache_lru_test.go
fzzf678 Oct 21, 2022
19e4fa2
Update plan_cache_lru.go
fzzf678 Oct 21, 2022
42fc907
Update plan_cache_lru.go
fzzf678 Oct 21, 2022
29531d1
undo
fzzf678 Oct 21, 2022
f1daf34
Update physical_plans.go
fzzf678 Oct 24, 2022
b1ea388
Update physical_plans.go
fzzf678 Oct 24, 2022
fb9349c
Update plan_cache_lru.go
fzzf678 Oct 24, 2022
2070e10
undo
fzzf678 Oct 24, 2022
edf9f9d
Update plan_cache_lru.go
fzzf678 Oct 24, 2022
0492e5b
Update tidb.json
fzzf678 Oct 24, 2022
1f4317f
rename
fzzf678 Oct 24, 2022
3a668dc
close plan cache in closeConn
fzzf678 Oct 24, 2022
1aa6c79
undo
fzzf678 Oct 24, 2022
d0bd08c
planNum
fzzf678 Oct 26, 2022
ddfb501
Update plan_cache_utils.go
fzzf678 Oct 27, 2022
fea446a
Update plan_cache_lru.go
fzzf678 Oct 31, 2022
6a2cef6
rename
fzzf678 Oct 31, 2022
929bf86
Update plan_cache_lru.go
fzzf678 Nov 2, 2022
a3276d3
Merge remote-tracking branch 'upstream/master' into monitor_for_planC…
fzzf678 Nov 2, 2022
ccf0f38
Update plan_cache_utils.go
fzzf678 Nov 2, 2022
20745b4
Update tracker.go
fzzf678 Nov 2, 2022
7e90253
Update plan_cache_lru.go
fzzf678 Nov 2, 2022
c661708
Merge branch 'master' into monitor_for_planCacheMemUsage
qw4990 Nov 2, 2022
079de11
Merge branch 'master' into monitor_for_planCacheMemUsage
ti-chi-bot Nov 2, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
101 changes: 101 additions & 0 deletions metrics/grafana/tidb.json
Original file line number Diff line number Diff line change
Expand Up @@ -7473,6 +7473,107 @@
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_TEST-CLUSTER}",
"description": "Total memory usage of all prepared plan cache in a instance",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 29
},
"hiddenSeries": false,
"id": 269,
"legend": {
"alignAsTable": true,
"avg": false,
"current": true,
"hideEmpty": true,
"max": true,
"min": false,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null as zero",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.11",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"exemplar": true,
"expr": "tidb_server_plan_cache_instance_memory_usage{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "{{instance}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Plan Cache Memory Usage",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:122",
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": "0",
"show": true
},
{
"$$hashKey": "object:123",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"repeat": null,
Expand Down
1 change: 1 addition & 0 deletions metrics/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ func RegisterMetrics() {
prometheus.MustRegister(PanicCounter)
prometheus.MustRegister(PlanCacheCounter)
prometheus.MustRegister(PlanCacheMissCounter)
prometheus.MustRegister(PlanCacheInstanceMemoryUsage)
prometheus.MustRegister(PseudoEstimation)
prometheus.MustRegister(PacketIOCounter)
prometheus.MustRegister(QueryDurationHistogram)
Expand Down
8 changes: 8 additions & 0 deletions metrics/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,14 @@ var (
Help: "Counter of plan cache miss.",
}, []string{LblType})

PlanCacheInstanceMemoryUsage = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "tidb",
Subsystem: "server",
Name: "plan_cache_instance_memory_usage",
Help: "Total plan cache memory usage of all sessions in a instance",
}, []string{LblType})

ReadFromTableCacheCounter = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "tidb",
Expand Down
5 changes: 1 addition & 4 deletions planner/core/physical_plans.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,10 +196,7 @@ func (p *PhysicalTableReader) MemoryUsage() (sum int64) {
if p.tablePlan != nil {
sum += p.tablePlan.MemoryUsage()
}

for _, plan := range p.TablePlans {
sum += plan.MemoryUsage()
}
// since TablePlans is the flats the tablePlan, so we don't count it
for _, pInfos := range p.PartitionInfos {
sum += pInfos.tableScan.MemoryUsage() + pInfos.partitionInfo.MemoryUsage()
}
Expand Down
33 changes: 24 additions & 9 deletions planner/core/plan_cache_lru.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"sync"

"github.com/pingcap/errors"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/hack"
"github.com/pingcap/tidb/util/kvcache"
Expand Down Expand Up @@ -71,7 +72,8 @@ func NewLRUPlanCache(capacity uint, guard float64, quota uint64,
capacity = 100
logutil.BgLogger().Info("capacity of LRU cache is less than 1, will use default value(100) init cache")
}

m := memory.NewTracker(memory.LabelForPreparedPlanCache, -1)
m.AttachTo(InstancePlanCacheMemoryTracker)
return &LRUPlanCache{
capacity: capacity,
size: 0,
Expand All @@ -80,7 +82,7 @@ func NewLRUPlanCache(capacity uint, guard float64, quota uint64,
pickFromBucket: pickFromBucket,
quota: quota,
guard: guard,
memTracker: newTrackerForLRUPC(),
memTracker: m,
}
}

Expand Down Expand Up @@ -111,6 +113,7 @@ func (l *LRUPlanCache) Get(key kvcache.Key, paramTypes []*types.FieldType) (valu
func (l *LRUPlanCache) Put(key kvcache.Key, value kvcache.Value, paramTypes []*types.FieldType) {
l.lock.Lock()
defer l.lock.Unlock()
defer l.updateMonitorMetric()

hash := strHashKey(key, true)
bucket, bucketExist := l.buckets[hash]
Expand Down Expand Up @@ -144,6 +147,7 @@ func (l *LRUPlanCache) Put(key kvcache.Key, value kvcache.Value, paramTypes []*t
func (l *LRUPlanCache) Delete(key kvcache.Key) {
l.lock.Lock()
defer l.lock.Unlock()
defer l.updateMonitorMetric()

hash := strHashKey(key, false)
bucket, bucketExist := l.buckets[hash]
Expand All @@ -161,13 +165,14 @@ func (l *LRUPlanCache) Delete(key kvcache.Key) {
func (l *LRUPlanCache) DeleteAll() {
l.lock.Lock()
defer l.lock.Unlock()
defer l.updateMonitorMetric()

for lru := l.lruList.Back(); lru != nil; lru = l.lruList.Back() {
l.lruList.Remove(lru)
l.size--
}
l.buckets = make(map[string]map[*list.Element]struct{}, 1)
l.memTracker = newTrackerForLRUPC()
l.memTracker.ReplaceBytesUsed(0)
}

// Size gets the current cache size.
Expand All @@ -182,6 +187,7 @@ func (l *LRUPlanCache) Size() int {
func (l *LRUPlanCache) SetCapacity(capacity uint) error {
l.lock.Lock()
defer l.lock.Unlock()
defer l.updateMonitorMetric()

if capacity < 1 {
return errors.New("capacity of LRU cache should be at least 1")
Expand All @@ -201,6 +207,18 @@ func (l *LRUPlanCache) MemoryUsage() (sum int64) {
return l.memTracker.BytesConsumed()
}

// Close do some clean work for LRUPlanCache when close the session
func (l *LRUPlanCache) Close() {
if l == nil {
return
}
if l.memTracker != nil {
l.memTracker.ReplaceBytesUsed(0)
l.updateMonitorMetric()
l.memTracker.Detach()
}
}

// removeOldest removes the oldest element from the cache.
func (l *LRUPlanCache) removeOldest() {
lru := l.lruList.Back()
Expand Down Expand Up @@ -251,10 +269,7 @@ func PickPlanFromBucket(bucket map[*list.Element]struct{}, paramTypes []*types.F
return nil, false
}

// newTrackerForLRUPC return a tracker which consumed emptyLRUPlanCacheSize
// todo: pass label when track general plan cache memory
func newTrackerForLRUPC() *memory.Tracker {
m := memory.NewTracker(memory.LabelForPreparedPlanCache, -1)
//todo: maybe need attach here
return m
// updateMonitor update the memory usage monitor to show in grafana
func (l *LRUPlanCache) updateMonitorMetric() {
metrics.PlanCacheInstanceMemoryUsage.WithLabelValues("instance").Set(float64(InstancePlanCacheMemoryTracker.BytesConsumed()))
}
8 changes: 8 additions & 0 deletions planner/core/plan_cache_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import (
"github.com/pingcap/tidb/util/hack"
"github.com/pingcap/tidb/util/hint"
"github.com/pingcap/tidb/util/kvcache"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/size"
atomic2 "go.uber.org/atomic"
"golang.org/x/exp/slices"
Expand All @@ -46,8 +47,15 @@ var (

// ExtractSelectAndNormalizeDigest extract the select statement and normalize it.
ExtractSelectAndNormalizeDigest func(stmtNode ast.StmtNode, specifiledDB string) (ast.StmtNode, string, string, error)

// InstancePlanCacheMemoryTracker is the ancestor of all prepared plan cache`s memory tracker
InstancePlanCacheMemoryTracker *memory.Tracker
)

func init() {
InstancePlanCacheMemoryTracker = memory.NewTracker(memory.LabelForInstancePlanCacheMemory, -1)
}

type paramMarkerExtractor struct {
markers []ast.ParamMarkerExpr
}
Expand Down
3 changes: 3 additions & 0 deletions session/session.go
Original file line number Diff line number Diff line change
Expand Up @@ -2543,6 +2543,9 @@ func (s *session) Close() {
s.stmtStats.SetFinished()
}
s.ClearDiskFullOpt()
if s.preparedPlanCache != nil {
s.preparedPlanCache.Close()
}
}

// GetSessionVars implements the context.Context interface.
Expand Down
1 change: 1 addition & 0 deletions sessionctx/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ type PlanCache interface {
DeleteAll()
Size() int
SetCapacity(capacity uint) error
Close()
}

// Context is an interface for transaction and executive args environment.
Expand Down
2 changes: 2 additions & 0 deletions util/memory/tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -787,6 +787,8 @@ const (
LabelForGlobalAnalyzeMemory int = -25
// LabelForPreparedPlanCache represents the label of the prepared plan cache memory usage
LabelForPreparedPlanCache int = -26
// LabelForInstancePlanCacheMemory represents the label of total plan cache memory usage of all session
LabelForInstancePlanCacheMemory int = -27
)

// MetricsTypes is used to get label for metrics
Expand Down