Skip to content
This repository has been archived by the owner on Apr 2, 2024. It is now read-only.

Commit

Permalink
Rename and add perf metrics to clockcache.
Browse files Browse the repository at this point in the history
Signed-off-by: Harkishen-Singh <harkishensingh@hotmail.com>

This commit renames cache metrics as per the new design and adds
performance metrics to clockcache. Following metrics are
implemented/updated:

promscale_cache_enabled{type="label", name=”cache-name”}
promscale_cache_capacity_bytes{type="label", name=”cache-name”}
promscale_cache_capacity_elements{type="label", name=”cache-name”}
promscale_cache_elements{type="label", name=”cache-name”}
promscale_cache_evictions_total{type="label", name=”cache-name”}
promscale_cache_queries_hits_total{type="label", name=”cache-name”}
promscale_cache_queries_total{type="label", name=”cache-name”}
promscale_cache_queries_latency_bucket{type="label", name=”cache-name”}
promscale_cache_queries_latency_sum{type="label", name=”cache-name”}
promscale_cache_queries_latency_count{type="label", name=”cache-name”}

However, statement_cache has promscale_cache_enabled,
promscale_statement_cache_per_connection_capacity and
promscale_cache_elements_histogram since it is not a clockcache.
  • Loading branch information
Harkishen-Singh committed Feb 16, 2022
1 parent ead44af commit 75daab3
Show file tree
Hide file tree
Showing 9 changed files with 186 additions and 114 deletions.
3 changes: 1 addition & 2 deletions pkg/api/metrics.go
Expand Up @@ -94,8 +94,7 @@ func createMetrics() *Metrics {
IngestedSamples: prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: util.PromNamespace,
Subsystem: "ingest",
Name: "ingested_total",
Name: "ingested_samples_total",
Help: "Total number of processed sample/metadata sent to remote storage.",
},
),
Expand Down
15 changes: 15 additions & 0 deletions pkg/clockcache/cache.go
Expand Up @@ -9,11 +9,13 @@ import (
"reflect"
"sync"
"sync/atomic"
"time"
)

// CLOCK based approximate LRU storing designed for concurrent usage.
// Gets only require a read lock, while Inserts take at least one write lock.
type Cache struct {
metrics *perfMetrics
// guards elements and all fields except for `used` in Element, must have at
// least a read-lock to access, and a write-lock to insert/update/delete.
elementsLock sync.RWMutex
Expand Down Expand Up @@ -48,11 +50,16 @@ type element struct {

func WithMax(max uint64) *Cache {
return &Cache{
metrics: &perfMetrics{}, // Unregistered metrics.
elements: make(map[interface{}]*element, max),
storage: make([]element, 0, max),
}
}

func (self *Cache) applyPerfMetric(m *perfMetrics) {
self.metrics = m
}

// Insert a key/value mapping into the cache if the key is not already present,
// The sizeBytes represents the in-memory size of the key and value (used to estimate cache size).
// returns the canonical version of the value
Expand Down Expand Up @@ -194,6 +201,9 @@ func (self *Cache) evict() (insertPtr *element) {
// keys will be the keys whose values are present, while the remainder
// will be the keys not present in the cache
func (self *Cache) GetValues(keys []interface{}, valuesOut []interface{}) (numFound int) {
start := time.Now()
defer func() { self.metrics.Observe("Get_Values", time.Since(start)) }()

if len(keys) != len(valuesOut) {
panic(fmt.Sprintf("keys and values are not the same len. %d keys, %d values", len(keys), len(valuesOut)))
}
Expand Down Expand Up @@ -222,12 +232,16 @@ func (self *Cache) GetValues(keys []interface{}, valuesOut []interface{}) (numFo
}

func (self *Cache) Get(key interface{}) (interface{}, bool) {
start := time.Now()
defer func() { self.metrics.Observe("Get", time.Since(start)) }()

self.elementsLock.RLock()
defer self.elementsLock.RUnlock()
return self.get(key)
}

func (self *Cache) get(key interface{}) (interface{}, bool) {
self.metrics.Inc(self.metrics.queriesTotal)
elem, present := self.elements[key]
if !present {
return 0, false
Expand All @@ -241,6 +255,7 @@ func (self *Cache) get(key interface{}) (interface{}, bool) {
if atomic.LoadUint32(&elem.used) == 0 {
atomic.StoreUint32(&elem.used, 1)
}
self.metrics.Inc(self.metrics.hitsTotal)

return elem.value, true
}
Expand Down
148 changes: 148 additions & 0 deletions pkg/clockcache/metrics.go
@@ -0,0 +1,148 @@
package clockcache

import (
"time"

"github.com/prometheus/client_golang/prometheus"
"github.com/timescale/promscale/pkg/util"
)

type MetricOptions struct {
Name, Module string
}

// WithMetrics attaches the default cache metrics like _enabled, _capacity, _size, _elements, _evictions_total and
// the perf metrics like _query_hits, _queries, _query_latency by function.
// The module must be either 'metric' or 'trace'.
func WithMetrics(cacheName, module string, max uint64) *Cache {
cache := WithMax(max)
RegisterBasicMetrics(cacheName, module, cache)

perf := new(perfMetrics)
perf.createAndRegister(cacheName, module)
cache.applyPerfMetric(perf)
return cache
}

type perfMetrics struct {
isApplied bool
hitsTotal prometheus.Counter
queriesTotal prometheus.Counter
queriesLatency *prometheus.HistogramVec
}

func (pm *perfMetrics) createAndRegister(name, module string) {
pm.isApplied = true
pm.hitsTotal = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "promscale",
Subsystem: "cache",
Name: "query_hits_total",
Help: "Total query hits in clockcache.",
ConstLabels: map[string]string{"type": module, "name": name},
},
)
pm.queriesTotal = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "promscale",
Subsystem: "cache",
Name: "queries_total",
Help: "Total query requests to the clockcache.",
ConstLabels: map[string]string{"type": module, "name": name},
},
)
pm.queriesLatency = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "promscale",
Subsystem: "cache",
Name: "query_latency_microseconds",
Help: "Query latency for the clockcache.",
ConstLabels: map[string]string{"type": module, "name": name},
Buckets: prometheus.LinearBuckets(1, 500, 20),
}, []string{"method"},
)
prometheus.MustRegister(pm.hitsTotal, pm.queriesTotal, pm.queriesLatency)
}

func (pm *perfMetrics) Inc(c prometheus.Counter) {
if pm.isApplied {
c.Inc()
}
}

func (pm *perfMetrics) Observe(method string, d time.Duration) {
if pm.isApplied {
pm.queriesLatency.WithLabelValues(method).Observe(float64(d.Microseconds()))
}
}

// RegisterBasicMetrics registers and creates basic metrics for cache like:
// 1. promscale_cache_enabled
// 2. promscale_cache_elements
// 3. promscale_cache_size
// 4. promscale_cache_capacity
// 5. promscale_cache_evictions_total
// Note: the moduleType refers to which module the cache belongs. Valid options: ["metric", "trace"].
func RegisterBasicMetrics(cacheName, moduleType string, c *Cache) {
if !(moduleType == "metric" || moduleType == "trace") {
panic("moduleType can only be either 'metric' or 'trace'")
}
enabled := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: util.PromNamespace,
Subsystem: "cache",
Name: "enabled",
Help: "Cache is enabled or not.",
ConstLabels: map[string]string{ // type => ["trace" or "metric"] and name => name of the cache i.e., metric cache, series cache, schema cache, etc.
"type": "trace",
"name": cacheName,
},
},
)
enabled.Set(1)
count := prometheus.NewGaugeFunc(
prometheus.GaugeOpts{
Namespace: util.PromNamespace,
Subsystem: "cache",
Name: "elements",
Help: "Number of elements in cache in terms of elements count.",
ConstLabels: map[string]string{"type": "trace", "name": cacheName},
}, func() float64 {
return float64(c.Len())
},
)
size := prometheus.NewGaugeFunc(
prometheus.GaugeOpts{
Namespace: util.PromNamespace,
Subsystem: "cache",
Name: "capacity_bytes",
Help: "Cache size in bytes.",
ConstLabels: map[string]string{"type": "trace", "name": cacheName},
}, func() float64 {
return float64(c.SizeBytes())
},
)
capacity := prometheus.NewGaugeFunc(
prometheus.GaugeOpts{
Namespace: util.PromNamespace,
Subsystem: "cache",
Name: "capacity_elements",
Help: "Total cache capacity in terms of elements count.",
ConstLabels: map[string]string{"type": "trace", "name": cacheName},
}, func() float64 {
return float64(c.Cap())
},
)
evictions := prometheus.NewCounterFunc(
prometheus.CounterOpts{
Namespace: util.PromNamespace,
Subsystem: "cache",
Name: "evictions_total",
Help: "Total evictions in a clockcache.",
ConstLabels: map[string]string{"type": "trace", "name": cacheName},
}, func() float64 {
return float64(c.Evictions())
},
)
prometheus.MustRegister(enabled, count, size, capacity, evictions)
}
120 changes: 15 additions & 105 deletions pkg/pgclient/metrics.go
Expand Up @@ -12,121 +12,29 @@ import (
)

var (
cachedMetricNames prometheus.GaugeFunc
cachedLabels prometheus.GaugeFunc
metricNamesCacheCap prometheus.GaugeFunc
metricNamesCacheEvictions prometheus.CounterFunc
labelsCacheCap prometheus.GaugeFunc
labelsCacheEvictions prometheus.CounterFunc
seriesCacheCap prometheus.GaugeFunc
seriesCacheLen prometheus.GaugeFunc
seriesCacheEvictions prometheus.CounterFunc
statementCacheLen prometheus.Histogram
statementCacheCap = prometheus.NewGauge(
statementCacheLen prometheus.Histogram
statementCacheCap = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: util.PromNamespace,
Name: "statement_cache_per_connection_capacity",
Subsystem: "cache",
Name: "statement_per_connection_capacity",
Help: "Maximum number of statements in connection pool's statement cache",
},
)
statementCacheEnabled = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: util.PromNamespace,
Name: "statement_cache_enabled",
Help: "Is the database connection pool's statement cache enabled",
Namespace: util.PromNamespace,
Subsystem: "cache",
Name: "enabled",
Help: "Cache is enabled or not.",
ConstLabels: map[string]string{"type": "metric", "name": "statement_cache"},
},
)
)

func InitClientMetrics(client *Client) {
// Only initialize once.
if cachedMetricNames != nil {
return
}

cachedMetricNames = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Namespace: util.PromNamespace,
Name: "metric_name_cache_elements_stored",
Help: "Total number of metric names in the metric name cache.",
}, func() float64 {
return float64(client.NumCachedMetricNames())
})

metricNamesCacheCap = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Namespace: util.PromNamespace,
Name: "metric_name_cache_capacity",
Help: "Maximum number of elements in the metric names cache.",
}, func() float64 {
return float64(client.MetricNamesCacheCapacity())
})

metricNamesCacheEvictions = prometheus.NewCounterFunc(prometheus.CounterOpts{
Namespace: util.PromNamespace,
Name: "metric_name_cache_evictions_total",
Help: "Evictions in the metric names cache.",
}, func() float64 {
return float64(client.metricCache.Evictions())
})

cachedLabels = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Namespace: util.PromNamespace,
Name: "label_cache_elements_stored",
Help: "Total number of label-id to label mappings cache.",
}, func() float64 {
return float64(client.NumCachedLabels())
})

labelsCacheCap = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Namespace: util.PromNamespace,
Name: "label_cache_capacity",
Help: "Total number of label-id to label mappings cache.",
}, func() float64 {
return float64(client.LabelsCacheCapacity())
})

labelsCacheEvictions = prometheus.NewCounterFunc(prometheus.CounterOpts{
Namespace: util.PromNamespace,
Name: "label_cache_evictions_total",
Help: "Total number of evictions in the label-id to label mappings cache.",
}, func() float64 {
return float64(client.labelsCache.Evictions())
})

seriesCacheLen = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Namespace: util.PromNamespace,
Name: "series_cache_elements_stored",
Help: "Total number of series stored in cache",
}, func() float64 {
return float64(client.seriesCache.Len())
})

seriesCacheCap = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Namespace: util.PromNamespace,
Name: "series_cache_capacity",
Help: "Total size of series cache.",
}, func() float64 {
return float64(client.seriesCache.Cap())
})

seriesCacheEvictions = prometheus.NewCounterFunc(prometheus.CounterOpts{
Namespace: util.PromNamespace,
Name: "series_cache_evictions_total",
Help: "Total number of series cache evictions.",
}, func() float64 {
return float64(client.seriesCache.Evictions())
})

statementCacheLen = createStatementCacheLengthHistogramMetric(client)
prometheus.MustRegister(
cachedMetricNames,
metricNamesCacheCap,
cachedLabels,
labelsCacheCap,
seriesCacheLen,
seriesCacheCap,
seriesCacheEvictions,
metricNamesCacheEvictions,
labelsCacheEvictions,
statementCacheEnabled,
statementCacheCap,
statementCacheLen,
Expand Down Expand Up @@ -154,10 +62,12 @@ func createStatementCacheLengthHistogramMetric(client *Client) prometheus.Histog
histogramStartBucket := math.Pow(histogramBucketFactor, minFactor)
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: util.PromNamespace,
Name: "statement_cache_elements_stored",
Help: "Number of statements in connection pool's statement cache",
Buckets: prometheus.ExponentialBuckets(histogramStartBucket, histogramBucketFactor, totalBuckets),
Namespace: util.PromNamespace,
Subsystem: "cache",
Name: "elements_histogram",
Help: "Number of elements in cache in terms of elements count.",
Buckets: prometheus.ExponentialBuckets(histogramStartBucket, histogramBucketFactor, totalBuckets),
ConstLabels: map[string]string{"type": "metric", "name": "statement_cache"},
},
)
}
4 changes: 2 additions & 2 deletions pkg/pgmodel/cache/cache.go
Expand Up @@ -62,7 +62,7 @@ type MetricNameCache struct {
}

func NewMetricCache(config Config) *MetricNameCache {
return &MetricNameCache{Metrics: clockcache.WithMax(config.MetricsCacheSize)}
return &MetricNameCache{Metrics: clockcache.WithMetrics("metric", "metric", config.MetricsCacheSize)}
}

// Get fetches the table name for specified metric.
Expand Down Expand Up @@ -113,5 +113,5 @@ func (m *MetricNameCache) Evictions() uint64 {
}

func NewLabelsCache(config Config) LabelsCache {
return clockcache.WithMax(config.LabelsCacheSize)
return clockcache.WithMetrics("label", "metric", config.LabelsCacheSize)
}
2 changes: 1 addition & 1 deletion pkg/pgmodel/cache/exemplar_key_cache.go
Expand Up @@ -29,7 +29,7 @@ type ExemplarLabelsPosCache struct {
// map[LabelName]LabelPosition. This means that the cache stores positions of each label's value per metric basis,
// which is meant to preserve and reuse _prom_catalog.exemplar_label_position table's 'pos' column.
func NewExemplarLabelsPosCache(config Config) PositionCache {
return &ExemplarLabelsPosCache{cache: clockcache.WithMax(config.ExemplarKeyPosCacheSize)}
return &ExemplarLabelsPosCache{cache: clockcache.WithMetrics("exemplar_labels", "metric", config.ExemplarKeyPosCacheSize)}
}

func (pos *ExemplarLabelsPosCache) GetLabelPositions(metric string) (map[string]int, bool) {
Expand Down

0 comments on commit 75daab3

Please sign in to comment.