From 9f859aeaf2302bc5ef674797a43acfcb93604074 Mon Sep 17 00:00:00 2001 From: Ivan Babrou Date: Sat, 1 Jul 2017 21:03:03 -0700 Subject: [PATCH 1/2] Add stats for part counts and sizes This adds states for number of parts and their sizes per table: ``` clickhouse_clickhouse_table_parts_bytes{database="r0",table="requests_internal_replica"} 5252 clickhouse_clickhouse_table_parts_bytes{database="r1",table="requests_internal_replica"} 5238 clickhouse_clickhouse_table_parts_bytes{database="r2",table="requests_internal_replica"} 5366 clickhouse_clickhouse_table_parts_bytes{database="system",table=".inner.clickhouse_query_stats_by_user"} 3061 clickhouse_clickhouse_table_parts_bytes{database="system",table="query_log"} 70451 clickhouse_clickhouse_table_parts_count{database="r0",table="requests_internal_replica"} 1 clickhouse_clickhouse_table_parts_count{database="r1",table="requests_internal_replica"} 1 clickhouse_clickhouse_table_parts_count{database="r2",table="requests_internal_replica"} 1 clickhouse_clickhouse_table_parts_count{database="system",table=".inner.clickhouse_query_stats_by_user"} 3 clickhouse_clickhouse_table_parts_count{database="system",table="query_log"} 4 ``` --- clickhouse_exporter.go | 102 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 92 insertions(+), 10 deletions(-) diff --git a/clickhouse_exporter.go b/clickhouse_exporter.go index 90d0cac..f6a1c3b 100644 --- a/clickhouse_exporter.go +++ b/clickhouse_exporter.go @@ -34,6 +34,7 @@ type Exporter struct { metricsURI string asyncMetricsURI string eventsURI string + partsURI string mutex sync.RWMutex client *http.Client @@ -58,10 +59,15 @@ func NewExporter(uri url.URL) *Exporter { q.Set("query", "select * from system.events") eventsURI.RawQuery = q.Encode() + partsURI := uri + q.Set("query", "select database, table, sum(bytes) as bytes, count() as parts from system.parts group by database, table") + partsURI.RawQuery = q.Encode() + return &Exporter{ metricsURI: metricsURI.String(), asyncMetricsURI: asyncMetricsURI.String(), eventsURI: eventsURI.String(), + partsURI: partsURI.String(), scrapeFailures: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Name: "exporter_scrape_failures_total", @@ -100,8 +106,7 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { } func (e *Exporter) collect(ch chan<- prometheus.Metric) error { - - metrics, err := e.parseResponse(e.metricsURI) + metrics, err := e.parseKeyValueResponse(e.metricsURI) if err != nil { return fmt.Errorf("Error scraping clickhouse url %v: %v", e.metricsURI, err) } @@ -116,7 +121,7 @@ func (e *Exporter) collect(ch chan<- prometheus.Metric) error { newMetric.Collect(ch) } - asyncMetrics, err := e.parseResponse(e.asyncMetricsURI) + asyncMetrics, err := e.parseKeyValueResponse(e.asyncMetricsURI) if err != nil { return fmt.Errorf("Error scraping clickhouse url %v: %v", e.asyncMetricsURI, err) } @@ -131,7 +136,7 @@ func (e *Exporter) collect(ch chan<- prometheus.Metric) error { newMetric.Collect(ch) } - events, err := e.parseResponse(e.eventsURI) + events, err := e.parseKeyValueResponse(e.eventsURI) if err != nil { return fmt.Errorf("Error scraping clickhouse url %v: %v", e.eventsURI, err) } @@ -144,15 +149,34 @@ func (e *Exporter) collect(ch chan<- prometheus.Metric) error { prometheus.CounterValue, float64(ev.value)) ch <- newMetric } - return nil -} -type lineResult struct { - key string - value int + parts, err := e.parsePartsResponse(e.partsURI) + if err != nil { + return fmt.Errorf("Error scraping clickhouse url %v: %v", e.partsURI, err) + } + + for _, part := range parts { + newBytesMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "clickhouse_table_parts_bytes", + Help: "Table size in bytes", + }, []string{"database", "table"}).WithLabelValues(part.database, part.table) + newBytesMetric.Set(float64(part.bytes)) + newBytesMetric.Collect(ch) + + newCountMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "clickhouse_table_parts_count", + Help: "Number of parts of the table", + }, []string{"database", "table"}).WithLabelValues(part.database, part.table) + newCountMetric.Set(float64(part.parts)) + newCountMetric.Collect(ch) + } + + return nil } -func (e *Exporter) parseResponse(uri string) ([]lineResult, error) { +func (e *Exporter) response(uri string) ([]byte, error) { resp, err := e.client.Get(uri) if err != nil { return nil, fmt.Errorf("Error scraping clickhouse: %v", err) @@ -167,6 +191,20 @@ func (e *Exporter) parseResponse(uri string) ([]lineResult, error) { return nil, fmt.Errorf("Status %s (%d): %s", resp.Status, resp.StatusCode, data) } + return data, nil +} + +type lineResult struct { + key string + value int +} + +func (e *Exporter) parseKeyValueResponse(uri string) ([]lineResult, error) { + data, err := e.response(uri) + if err != nil { + return nil, err + } + // Parsing results lines := strings.Split(string(data), "\n") var results []lineResult = make([]lineResult, 0) @@ -190,6 +228,50 @@ func (e *Exporter) parseResponse(uri string) ([]lineResult, error) { return results, nil } +type partsResult struct { + database string + table string + bytes int + parts int +} + +func (e *Exporter) parsePartsResponse(uri string) ([]partsResult, error) { + data, err := e.response(uri) + if err != nil { + return nil, err + } + + // Parsing results + lines := strings.Split(string(data), "\n") + var results []partsResult = make([]partsResult, 0) + + for i, line := range lines { + parts := strings.Fields(line) + if len(parts) == 0 { + continue + } + if len(parts) != 4 { + return nil, fmt.Errorf("Unexpected %d line: %s", i, line) + } + database := strings.TrimSpace(parts[0]) + table := strings.TrimSpace(parts[1]) + + bytes, err := strconv.Atoi(strings.TrimSpace(parts[2])) + if err != nil { + return nil, err + } + + count, err := strconv.Atoi(strings.TrimSpace(parts[3])) + if err != nil { + return nil, err + } + + results = append(results, partsResult{database, table, bytes, count}) + } + + return results, nil +} + // Collect fetches the stats from configured clickhouse location and delivers them // as Prometheus metrics. It implements prometheus.Collector. func (e *Exporter) Collect(ch chan<- prometheus.Metric) { From b6083981440afceb9f35b64e67f0588ab33205eb Mon Sep 17 00:00:00 2001 From: Yegor Andreenko Date: Mon, 10 Jul 2017 19:19:11 +0200 Subject: [PATCH 2/2] pretty name & parts active condition --- clickhouse_exporter.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clickhouse_exporter.go b/clickhouse_exporter.go index f6a1c3b..ae06c5b 100644 --- a/clickhouse_exporter.go +++ b/clickhouse_exporter.go @@ -60,7 +60,7 @@ func NewExporter(uri url.URL) *Exporter { eventsURI.RawQuery = q.Encode() partsURI := uri - q.Set("query", "select database, table, sum(bytes) as bytes, count() as parts from system.parts group by database, table") + q.Set("query", "select database, table, sum(bytes) as bytes, count() as parts from system.parts where active = 1 group by database, table") partsURI.RawQuery = q.Encode() return &Exporter{ @@ -158,7 +158,7 @@ func (e *Exporter) collect(ch chan<- prometheus.Metric) error { for _, part := range parts { newBytesMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, - Name: "clickhouse_table_parts_bytes", + Name: "table_parts_bytes", Help: "Table size in bytes", }, []string{"database", "table"}).WithLabelValues(part.database, part.table) newBytesMetric.Set(float64(part.bytes)) @@ -166,7 +166,7 @@ func (e *Exporter) collect(ch chan<- prometheus.Metric) error { newCountMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, - Name: "clickhouse_table_parts_count", + Name: "table_parts_count", Help: "Number of parts of the table", }, []string{"database", "table"}).WithLabelValues(part.database, part.table) newCountMetric.Set(float64(part.parts))