From 3856df0de088f9916fd7df3bf8e0979ce78c2351 Mon Sep 17 00:00:00 2001 From: "James Hughes (Splunk)" Date: Wed, 10 Jan 2024 18:35:42 -0500 Subject: [PATCH] Redis replica offset (#29565) **Description:** Adds missing metrics for `slave_repl_offset` and adds an integration test to configure a cluster to test such **Link to tracking Issue:** [`6942`](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/6942) **Testing:** `cd receiver/redisreceiver && make mod-integration-test` **Documentation:** (autogenerated from mdatagen) --- .chloggen/redis_replica_offset.yaml | 27 ++ receiver/redisreceiver/documentation.md | 8 + receiver/redisreceiver/integration_test.go | 45 ++- .../internal/metadata/generated_config.go | 4 + .../metadata/generated_config_test.go | 2 + .../internal/metadata/generated_metrics.go | 57 ++++ .../metadata/generated_metrics_test.go | 15 + .../internal/metadata/testdata/config.yaml | 4 + receiver/redisreceiver/metadata.yaml | 8 +- receiver/redisreceiver/metric_functions.go | 1 + receiver/redisreceiver/redis_scraper_test.go | 4 +- .../testdata/integration/Dockerfile.cluster | 31 ++ .../testdata/integration/cluster.sh | 43 +++ .../testdata/integration/configure-nodes.sh | 12 + .../integration/expected-cluster.yaml | 292 ++++++++++++++++++ .../{expected.yaml => expected-old.yaml} | 0 .../testdata/integration/redis-cluster.conf | 5 + 17 files changed, 554 insertions(+), 4 deletions(-) create mode 100755 .chloggen/redis_replica_offset.yaml create mode 100644 receiver/redisreceiver/testdata/integration/Dockerfile.cluster create mode 100755 receiver/redisreceiver/testdata/integration/cluster.sh create mode 100755 receiver/redisreceiver/testdata/integration/configure-nodes.sh create mode 100644 receiver/redisreceiver/testdata/integration/expected-cluster.yaml rename receiver/redisreceiver/testdata/integration/{expected.yaml => expected-old.yaml} (100%) create mode 100644 receiver/redisreceiver/testdata/integration/redis-cluster.conf diff --git a/.chloggen/redis_replica_offset.yaml b/.chloggen/redis_replica_offset.yaml new file mode 100755 index 0000000000000..fb552c4ec2058 --- /dev/null +++ b/.chloggen/redis_replica_offset.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: redisreciever + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: adds metric for slave_repl_offset + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [6942] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: also adds a shell script to set up docker-compose integration test + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/receiver/redisreceiver/documentation.md b/receiver/redisreceiver/documentation.md index ff610dd2d5db2..ffd41f3c593e8 100644 --- a/receiver/redisreceiver/documentation.md +++ b/receiver/redisreceiver/documentation.md @@ -329,6 +329,14 @@ The value of the maxmemory configuration directive | ---- | ----------- | ---------- | | By | Gauge | Int | +### redis.replication.replica_offset + +Offset for redis replica + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + ### redis.role Redis node's role diff --git a/receiver/redisreceiver/integration_test.go b/receiver/redisreceiver/integration_test.go index 4d64d6e9eddfb..9dde7ea129d3c 100644 --- a/receiver/redisreceiver/integration_test.go +++ b/receiver/redisreceiver/integration_test.go @@ -8,7 +8,9 @@ package redisreceiver import ( "fmt" + "path/filepath" "testing" + "time" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/wait" @@ -20,7 +22,7 @@ import ( const redisPort = "6379" -func TestIntegration(t *testing.T) { +func TestIntegrationV6(t *testing.T) { scraperinttest.NewIntegrationTest( NewFactory(), scraperinttest.WithContainerRequest( @@ -46,5 +48,46 @@ func TestIntegration(t *testing.T) { return redisPort }), ), + scraperinttest.WithExpectedFile(filepath.Join("testdata", "integration", "expected-old.yaml")), + ).Run(t) +} + +func TestIntegrationV7Cluster(t *testing.T) { + scraperinttest.NewIntegrationTest( + NewFactory(), + scraperinttest.WithContainerRequest(testcontainers.ContainerRequest{ + ExposedPorts: []string{ + redisPort, + "6380", + "6381", + "6382", + "6383", + "6384", + "6385", + }, + FromDockerfile: testcontainers.FromDockerfile{ + Context: filepath.Join("testdata", "integration"), + Dockerfile: "Dockerfile.cluster", + }, + WaitingFor: wait.ForListeningPort("6385").WithStartupTimeout(30 * time.Second), + }), + scraperinttest.WithCustomConfig( + func(t *testing.T, cfg component.Config, ci *scraperinttest.ContainerInfo) { + rCfg := cfg.(*Config) + // Strictly speaking this is non-deterministic and may not be the right port for one with repl offset + // However, we're using socat and some port forwarding in the Dockerfile to ensure this always points + // to a replica node, so in practice any failures due to cluster node role changes is unlikely + rCfg.Endpoint = fmt.Sprintf("%s:%s", ci.Host(t), ci.MappedPort(t, "6385")) + rCfg.MetricsBuilderConfig.Metrics.RedisReplicationReplicaOffset.Enabled = true + }), + scraperinttest.WithCompareOptions( + pmetrictest.IgnoreMetricValues(), + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreTimestamp(), + ), + scraperinttest.WithExpectedFile(filepath.Join("testdata", "integration", "expected-cluster.yaml")), + scraperinttest.WithCreateContainerTimeout(time.Minute), + scraperinttest.WithCompareTimeout(time.Minute), ).Run(t) } diff --git a/receiver/redisreceiver/internal/metadata/generated_config.go b/receiver/redisreceiver/internal/metadata/generated_config.go index 8a860df9d45e3..2222fff52138e 100644 --- a/receiver/redisreceiver/internal/metadata/generated_config.go +++ b/receiver/redisreceiver/internal/metadata/generated_config.go @@ -56,6 +56,7 @@ type MetricsConfig struct { RedisRdbChangesSinceLastSave MetricConfig `mapstructure:"redis.rdb.changes_since_last_save"` RedisReplicationBacklogFirstByteOffset MetricConfig `mapstructure:"redis.replication.backlog_first_byte_offset"` RedisReplicationOffset MetricConfig `mapstructure:"redis.replication.offset"` + RedisReplicationReplicaOffset MetricConfig `mapstructure:"redis.replication.replica_offset"` RedisRole MetricConfig `mapstructure:"redis.role"` RedisSlavesConnected MetricConfig `mapstructure:"redis.slaves.connected"` RedisUptime MetricConfig `mapstructure:"redis.uptime"` @@ -156,6 +157,9 @@ func DefaultMetricsConfig() MetricsConfig { RedisReplicationOffset: MetricConfig{ Enabled: true, }, + RedisReplicationReplicaOffset: MetricConfig{ + Enabled: false, + }, RedisRole: MetricConfig{ Enabled: false, }, diff --git a/receiver/redisreceiver/internal/metadata/generated_config_test.go b/receiver/redisreceiver/internal/metadata/generated_config_test.go index 1c37d02d5b19a..4e268a0579b53 100644 --- a/receiver/redisreceiver/internal/metadata/generated_config_test.go +++ b/receiver/redisreceiver/internal/metadata/generated_config_test.go @@ -57,6 +57,7 @@ func TestMetricsBuilderConfig(t *testing.T) { RedisRdbChangesSinceLastSave: MetricConfig{Enabled: true}, RedisReplicationBacklogFirstByteOffset: MetricConfig{Enabled: true}, RedisReplicationOffset: MetricConfig{Enabled: true}, + RedisReplicationReplicaOffset: MetricConfig{Enabled: true}, RedisRole: MetricConfig{Enabled: true}, RedisSlavesConnected: MetricConfig{Enabled: true}, RedisUptime: MetricConfig{Enabled: true}, @@ -103,6 +104,7 @@ func TestMetricsBuilderConfig(t *testing.T) { RedisRdbChangesSinceLastSave: MetricConfig{Enabled: false}, RedisReplicationBacklogFirstByteOffset: MetricConfig{Enabled: false}, RedisReplicationOffset: MetricConfig{Enabled: false}, + RedisReplicationReplicaOffset: MetricConfig{Enabled: false}, RedisRole: MetricConfig{Enabled: false}, RedisSlavesConnected: MetricConfig{Enabled: false}, RedisUptime: MetricConfig{Enabled: false}, diff --git a/receiver/redisreceiver/internal/metadata/generated_metrics.go b/receiver/redisreceiver/internal/metadata/generated_metrics.go index 61763d10f40ce..5d66c452df2d1 100644 --- a/receiver/redisreceiver/internal/metadata/generated_metrics.go +++ b/receiver/redisreceiver/internal/metadata/generated_metrics.go @@ -1673,6 +1673,55 @@ func newMetricRedisReplicationOffset(cfg MetricConfig) metricRedisReplicationOff return m } +type metricRedisReplicationReplicaOffset struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills redis.replication.replica_offset metric with initial data. +func (m *metricRedisReplicationReplicaOffset) init() { + m.data.SetName("redis.replication.replica_offset") + m.data.SetDescription("Offset for redis replica") + m.data.SetUnit("By") + m.data.SetEmptyGauge() +} + +func (m *metricRedisReplicationReplicaOffset) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricRedisReplicationReplicaOffset) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricRedisReplicationReplicaOffset) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricRedisReplicationReplicaOffset(cfg MetricConfig) metricRedisReplicationReplicaOffset { + m := metricRedisReplicationReplicaOffset{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricRedisRole struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1867,6 +1916,7 @@ type MetricsBuilder struct { metricRedisRdbChangesSinceLastSave metricRedisRdbChangesSinceLastSave metricRedisReplicationBacklogFirstByteOffset metricRedisReplicationBacklogFirstByteOffset metricRedisReplicationOffset metricRedisReplicationOffset + metricRedisReplicationReplicaOffset metricRedisReplicationReplicaOffset metricRedisRole metricRedisRole metricRedisSlavesConnected metricRedisSlavesConnected metricRedisUptime metricRedisUptime @@ -1919,6 +1969,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricRedisRdbChangesSinceLastSave: newMetricRedisRdbChangesSinceLastSave(mbc.Metrics.RedisRdbChangesSinceLastSave), metricRedisReplicationBacklogFirstByteOffset: newMetricRedisReplicationBacklogFirstByteOffset(mbc.Metrics.RedisReplicationBacklogFirstByteOffset), metricRedisReplicationOffset: newMetricRedisReplicationOffset(mbc.Metrics.RedisReplicationOffset), + metricRedisReplicationReplicaOffset: newMetricRedisReplicationReplicaOffset(mbc.Metrics.RedisReplicationReplicaOffset), metricRedisRole: newMetricRedisRole(mbc.Metrics.RedisRole), metricRedisSlavesConnected: newMetricRedisSlavesConnected(mbc.Metrics.RedisSlavesConnected), metricRedisUptime: newMetricRedisUptime(mbc.Metrics.RedisUptime), @@ -2014,6 +2065,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricRedisRdbChangesSinceLastSave.emit(ils.Metrics()) mb.metricRedisReplicationBacklogFirstByteOffset.emit(ils.Metrics()) mb.metricRedisReplicationOffset.emit(ils.Metrics()) + mb.metricRedisReplicationReplicaOffset.emit(ils.Metrics()) mb.metricRedisRole.emit(ils.Metrics()) mb.metricRedisSlavesConnected.emit(ils.Metrics()) mb.metricRedisUptime.emit(ils.Metrics()) @@ -2192,6 +2244,11 @@ func (mb *MetricsBuilder) RecordRedisReplicationOffsetDataPoint(ts pcommon.Times mb.metricRedisReplicationOffset.recordDataPoint(mb.startTime, ts, val) } +// RecordRedisReplicationReplicaOffsetDataPoint adds a data point to redis.replication.replica_offset metric. +func (mb *MetricsBuilder) RecordRedisReplicationReplicaOffsetDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricRedisReplicationReplicaOffset.recordDataPoint(mb.startTime, ts, val) +} + // RecordRedisRoleDataPoint adds a data point to redis.role metric. func (mb *MetricsBuilder) RecordRedisRoleDataPoint(ts pcommon.Timestamp, val int64, roleAttributeValue AttributeRole) { mb.metricRedisRole.recordDataPoint(mb.startTime, ts, val, roleAttributeValue.String()) diff --git a/receiver/redisreceiver/internal/metadata/generated_metrics_test.go b/receiver/redisreceiver/internal/metadata/generated_metrics_test.go index 82d6987947471..ba5c9abfafc34 100644 --- a/receiver/redisreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/redisreceiver/internal/metadata/generated_metrics_test.go @@ -175,6 +175,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordRedisReplicationOffsetDataPoint(ts, 1) + allMetricsCount++ + mb.RecordRedisReplicationReplicaOffsetDataPoint(ts, 1) + allMetricsCount++ mb.RecordRedisRoleDataPoint(ts, 1, AttributeRoleReplica) @@ -638,6 +641,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "redis.replication.replica_offset": + assert.False(t, validatedMetrics["redis.replication.replica_offset"], "Found a duplicate in the metrics slice: redis.replication.replica_offset") + validatedMetrics["redis.replication.replica_offset"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Offset for redis replica", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) case "redis.role": assert.False(t, validatedMetrics["redis.role"], "Found a duplicate in the metrics slice: redis.role") validatedMetrics["redis.role"] = true diff --git a/receiver/redisreceiver/internal/metadata/testdata/config.yaml b/receiver/redisreceiver/internal/metadata/testdata/config.yaml index a392e00781541..a66045913a3b1 100644 --- a/receiver/redisreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/redisreceiver/internal/metadata/testdata/config.yaml @@ -63,6 +63,8 @@ all_set: enabled: true redis.replication.offset: enabled: true + redis.replication.replica_offset: + enabled: true redis.role: enabled: true redis.slaves.connected: @@ -140,6 +142,8 @@ none_set: enabled: false redis.replication.offset: enabled: false + redis.replication.replica_offset: + enabled: false redis.role: enabled: false redis.slaves.connected: diff --git a/receiver/redisreceiver/metadata.yaml b/receiver/redisreceiver/metadata.yaml index 94a4d5fc2d851..69153dbc4309e 100644 --- a/receiver/redisreceiver/metadata.yaml +++ b/receiver/redisreceiver/metadata.yaml @@ -344,7 +344,13 @@ metrics: gauge: value_type: int attributes: [db] - + # below are all disabled by default + redis.replication.replica_offset: + enabled: false + description: "Offset for redis replica" + unit: "By" + gauge: + value_type: int tests: config: endpoint: localhost:6379 diff --git a/receiver/redisreceiver/metric_functions.go b/receiver/redisreceiver/metric_functions.go index ab93867ddee0b..96281b8349a71 100644 --- a/receiver/redisreceiver/metric_functions.go +++ b/receiver/redisreceiver/metric_functions.go @@ -30,6 +30,7 @@ func (rs *redisScraper) dataPointRecorders() map[string]any { "rdb_changes_since_last_save": rs.mb.RecordRedisRdbChangesSinceLastSaveDataPoint, "rejected_connections": rs.mb.RecordRedisConnectionsRejectedDataPoint, "repl_backlog_first_byte_offset": rs.mb.RecordRedisReplicationBacklogFirstByteOffsetDataPoint, + "slave_repl_offset": rs.mb.RecordRedisReplicationReplicaOffsetDataPoint, "total_commands_processed": rs.mb.RecordRedisCommandsProcessedDataPoint, "total_connections_received": rs.mb.RecordRedisConnectionsReceivedDataPoint, "total_net_input_bytes": rs.mb.RecordRedisNetInputDataPoint, diff --git a/receiver/redisreceiver/redis_scraper_test.go b/receiver/redisreceiver/redis_scraper_test.go index 49379ded9cdc6..a7da4f803a681 100644 --- a/receiver/redisreceiver/redis_scraper_test.go +++ b/receiver/redisreceiver/redis_scraper_test.go @@ -28,8 +28,8 @@ func TestRedisRunnable(t *testing.T) { md, err := runner.Scrape(context.Background()) require.NoError(t, err) // + 6 because there are two keyspace entries each of which has three metrics - // -1 because maxmemory is by default disabled, so recorder is there, but there won't be data point - assert.Equal(t, len(rs.dataPointRecorders())+6-1, md.DataPointCount()) + // -2 because maxmemory and slave_repl_offset is by default disabled, so recorder is there, but there won't be data point + assert.Equal(t, len(rs.dataPointRecorders())+6-2, md.DataPointCount()) rm := md.ResourceMetrics().At(0) ilm := rm.ScopeMetrics().At(0) il := ilm.Scope() diff --git a/receiver/redisreceiver/testdata/integration/Dockerfile.cluster b/receiver/redisreceiver/testdata/integration/Dockerfile.cluster new file mode 100644 index 0000000000000..28a57b9614263 --- /dev/null +++ b/receiver/redisreceiver/testdata/integration/Dockerfile.cluster @@ -0,0 +1,31 @@ +# Use the official Redis image as the base +FROM redis:7.2.3 + +RUN apt update && apt install --assume-yes socat + +# Seems to be an upstream issue with testcontainers or scraperint when using named nodes or clusters, so manually do it +COPY cluster.sh /usr/local/bin/cluster.sh +COPY configure-nodes.sh /usr/local/bin/configure-nodes.sh +COPY redis-cluster.conf /etc/redis-cluster.conf + +RUN chown redis:redis /usr/local/bin/cluster.sh +RUN chmod +x /usr/local/bin/cluster.sh + +RUN chown redis:redis /usr/local/bin/configure-nodes.sh +RUN chmod +x /usr/local/bin/configure-nodes.sh + +RUN configure-nodes.sh +RUN chown redis:redis /etc/redis-cluster*.conf + +RUN mkdir -p /var/log/redis +RUN chgrp redis /var/log/redis + +EXPOSE 6379 +EXPOSE 6380 +EXPOSE 6381 +EXPOSE 6382 +EXPOSE 6383 +EXPOSE 6384 +EXPOSE 6385 +ENTRYPOINT ["cluster.sh"] +CMD ["cluster.sh"] diff --git a/receiver/redisreceiver/testdata/integration/cluster.sh b/receiver/redisreceiver/testdata/integration/cluster.sh new file mode 100755 index 0000000000000..84389993b46fe --- /dev/null +++ b/receiver/redisreceiver/testdata/integration/cluster.sh @@ -0,0 +1,43 @@ +#!/bin/sh -eux +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + + +redis-server /etc/redis-cluster-6379.conf && \ + redis-server /etc/redis-cluster-6380.conf && \ + redis-server /etc/redis-cluster-6381.conf && \ + redis-server /etc/redis-cluster-6382.conf && \ + redis-server /etc/redis-cluster-6383.conf && \ + redis-server /etc/redis-cluster-6384.conf +redis-cli -p 6379 ping +redis-cli -p 6380 ping +redis-cli -p 6381 ping +redis-cli -p 6382 ping +redis-cli -p 6383 ping +redis-cli -p 6384 ping + +redis-cli --cluster create localhost:6379 localhost:6380 localhost:6381 localhost:6382 localhost:6383 localhost:6384 --cluster-replicas 1 --cluster-yes +sleep 10s +while true; do + if redis-cli -p 6379 cluster info | grep -q "cluster_state:ok" ; then + break + fi + echo "awaiting for cluster to be ready" + sleep 2 +done + + +# ensure a consistent mapping to a replica on port 6385 +REPLICA_PORT=$(redis-cli -p 6379 cluster nodes | grep 'slave' | awk '{print $2}' | cut -d':' -f2 | head -n 1) +REPLICA_PORT=${REPLICA_PORT%@*} +if [ -n "$REPLICA_PORT" ]; then + echo "forwarding from port $REPLICA_PORT to 6385" + #ssh -fNL "6385:127.0.0.1:$REPLICA_PORT" 127.0.0.1 + nohup socat tcp-listen:6385,fork,reuseaddr,forever,reuseaddr,keepalive,keepidle=10,keepintvl=10,keepcnt=2 tcp-connect:127.0.0.1:"$REPLICA_PORT" & +else + echo "could not find replica port" 1>&2 + exit 1 +fi + +tail -f /dev/null +#ssh -fNL 6385:localhost:$REPLICA_PORT diff --git a/receiver/redisreceiver/testdata/integration/configure-nodes.sh b/receiver/redisreceiver/testdata/integration/configure-nodes.sh new file mode 100755 index 0000000000000..fdab7a3c306d7 --- /dev/null +++ b/receiver/redisreceiver/testdata/integration/configure-nodes.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +start_port=6379 +end_port=6384 +for port in $(seq $start_port $end_port); do + cp /etc/redis-cluster.conf "/etc/redis-cluster-$port.conf" + echo "port $port" >> "/etc/redis-cluster-$port.conf" + echo "logfile /var/log/redis/redis-server-$port.log" >> "/etc/redis-cluster-$port.conf" + echo "cluster-config-file nodes-$port.conf" >> "/etc/redis-cluster-$port.conf" +done diff --git a/receiver/redisreceiver/testdata/integration/expected-cluster.yaml b/receiver/redisreceiver/testdata/integration/expected-cluster.yaml new file mode 100644 index 0000000000000..f4f26a78a286f --- /dev/null +++ b/receiver/redisreceiver/testdata/integration/expected-cluster.yaml @@ -0,0 +1,292 @@ +resourceMetrics: + - resource: + attributes: + - key: redis.version + value: + stringValue: 7.2.3 + scopeMetrics: + - metrics: + - description: Number of clients pending on a blocking call + name: redis.clients.blocked + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{client}' + - description: Number of client connections (excluding connections from replicas) + name: redis.clients.connected + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{client}' + - description: Biggest input buffer among current client connections + gauge: + dataPoints: + - asInt: "24" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: redis.clients.max_input_buffer + unit: By + - description: Longest output list among current client connections + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: redis.clients.max_output_buffer + unit: By + - description: Number of commands processed per second + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: redis.commands + unit: '{ops}/s' + - description: Total number of commands processed by the server + name: redis.commands.processed + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "16" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{command}' + - description: Total number of connections accepted by the server + name: redis.connections.received + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{connection}' + - description: Number of connections rejected because of maxclients limit + name: redis.connections.rejected + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{connection}' + - description: System CPU consumed by the Redis server in seconds since server start + name: redis.cpu.time + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0.015728 + attributes: + - key: state + value: + stringValue: sys + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0 + attributes: + - key: state + value: + stringValue: sys_children + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0.015961 + attributes: + - key: state + value: + stringValue: sys_main_thread + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0.013353 + attributes: + - key: state + value: + stringValue: user + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0 + attributes: + - key: state + value: + stringValue: user_children + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0.012769 + attributes: + - key: state + value: + stringValue: user_main_thread + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: s + - description: Number of evicted keys due to maxmemory limit + name: redis.keys.evicted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{key}' + - description: Total number of key expiration events + name: redis.keys.expired + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{event}' + - description: Number of successful lookup of keys in the main dictionary + name: redis.keyspace.hits + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{hit}' + - description: Number of failed lookup of keys in the main dictionary + name: redis.keyspace.misses + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{miss}' + - description: Duration of the latest fork operation in microseconds + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: redis.latest_fork + unit: us + - description: Ratio between used_memory_rss and used_memory + gauge: + dataPoints: + - asDouble: 4.54 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: redis.memory.fragmentation_ratio + unit: "1" + - description: Number of bytes used by the Lua engine + gauge: + dataPoints: + - asInt: "31744" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: redis.memory.lua + unit: By + - description: Peak memory consumed by Redis (in bytes) + gauge: + dataPoints: + - asInt: "1894880" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: redis.memory.peak + unit: By + - description: Number of bytes that Redis allocated as seen by the operating system + gauge: + dataPoints: + - asInt: "8196096" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: redis.memory.rss + unit: By + - description: Total number of bytes allocated by Redis using its allocator + gauge: + dataPoints: + - asInt: "1894880" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: redis.memory.used + unit: By + - description: The total number of bytes read from the network + name: redis.net.input + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "877" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: The total number of bytes written to the network + name: redis.net.output + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "13421" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Number of changes since the last dump + name: redis.rdb.changes_since_last_save + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{change}' + - description: The master offset of the replication backlog buffer + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: redis.replication.backlog_first_byte_offset + unit: By + - description: The server's current replication offset + gauge: + dataPoints: + - asInt: "14" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: redis.replication.offset + unit: By + - description: Offset for redis replica + gauge: + dataPoints: + - asInt: "14" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: redis.replication.replica_offset + unit: By + - description: Number of connected replicas + name: redis.slaves.connected + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{replica}' + - description: Number of seconds since Redis server start + name: redis.uptime + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "13" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: s + scope: + name: otelcol/redisreceiver + version: latest diff --git a/receiver/redisreceiver/testdata/integration/expected.yaml b/receiver/redisreceiver/testdata/integration/expected-old.yaml similarity index 100% rename from receiver/redisreceiver/testdata/integration/expected.yaml rename to receiver/redisreceiver/testdata/integration/expected-old.yaml diff --git a/receiver/redisreceiver/testdata/integration/redis-cluster.conf b/receiver/redisreceiver/testdata/integration/redis-cluster.conf new file mode 100644 index 0000000000000..0f214ed362883 --- /dev/null +++ b/receiver/redisreceiver/testdata/integration/redis-cluster.conf @@ -0,0 +1,5 @@ +cluster-enabled yes +cluster-node-timeout 5000 +appendonly no +save "" +daemonize yes