Skip to content

Commit

Permalink
Added testcases for default metrics and resolved feedback (#6681)
Browse files Browse the repository at this point in the history
  • Loading branch information
PaurushGarg committed Dec 10, 2021
1 parent 450d5f8 commit 08ed4f5
Show file tree
Hide file tree
Showing 4 changed files with 201 additions and 107 deletions.
171 changes: 86 additions & 85 deletions receiver/prometheusreceiver/metrics_receiver_helper_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"context"
"fmt"
"log"
"math"
"net/http"
"net/http/httptest"
"net/url"
Expand All @@ -27,6 +28,7 @@ import (

gokitlog "github.com/go-kit/log"
promcfg "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/pkg/value"
"github.com/prometheus/prometheus/scrape"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -110,11 +112,11 @@ var (
)

type testData struct {
name string
pages []mockPrometheusResponse
attributes pdata.AttributeMap
useOpenMetrics bool
validateFunc func(t *testing.T, td *testData, result []*pdata.ResourceMetrics)
name string
pages []mockPrometheusResponse
attributes pdata.AttributeMap
validateScrapes bool
validateFunc func(t *testing.T, td *testData, result []*pdata.ResourceMetrics)
}

// setupMockPrometheus to create a mocked prometheus based on targets, returning the server and a prometheus exporting
Expand All @@ -124,9 +126,6 @@ func setupMockPrometheus(tds ...*testData) (*mockPrometheus, *promcfg.Config, er
endpoints := make(map[string][]mockPrometheusResponse)
metricPaths := make([]string, 0)
for _, t := range tds {
for i := range t.pages {
t.pages[i].useOpenMetrics = t.useOpenMetrics
}
metricPath := fmt.Sprintf("/%s/metrics", t.name)
endpoints[metricPath] = t.pages
metricPaths = append(metricPaths, metricPath)
Expand All @@ -137,7 +136,7 @@ func setupMockPrometheus(tds ...*testData) (*mockPrometheus, *promcfg.Config, er
job := make(map[string]interface{})
job["job_name"] = tds[i].name
job["metrics_path"] = metricPaths[i]
job["scrape_interval"] = "1s"
job["scrape_interval"] = "100ms"
job["static_configs"] = []map[string]interface{}{{"targets": []string{u.Host}}}
jobs = append(jobs, job)
}
Expand All @@ -158,7 +157,7 @@ func setupMockPrometheus(tds ...*testData) (*mockPrometheus, *promcfg.Config, er
return mp, pCfg, err
}

func verifyNumScrapeResults(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) {
func verifyNumValidScrapeResults(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) {
want := 0
for _, p := range td.pages {
if p.code == 200 {
Expand Down Expand Up @@ -197,7 +196,7 @@ func getValidScrapes(t *testing.T, rms []*pdata.ResourceMetrics) []*pdata.Resour
for i := 0; i < len(rms); i++ {
allMetrics := getMetrics(rms[i])
if expectedScrapeMetricCount < len(allMetrics) && countScrapeMetrics(allMetrics) == expectedScrapeMetricCount {
if isFirstFailedScrape(allMetrics) {
if isFirstFailedScrape(t, allMetrics) {
continue
}
assertUp(t, 1, allMetrics)
Expand All @@ -209,14 +208,52 @@ func getValidScrapes(t *testing.T, rms []*pdata.ResourceMetrics) []*pdata.Resour
return out
}

func isFirstFailedScrape(metrics []*pdata.Metric) bool {
func isFirstFailedScrape(t *testing.T, metrics []*pdata.Metric) bool {
for _, m := range metrics {
if m.Name() == "up" {
if m.Gauge().DataPoints().At(0).DoubleVal() == 1 { // assumed up will not have multiple datapoints
return false
}
}
}
// TODO: Issue #6376. Remove this skip once OTLP format is directly used in Prometheus Receiver.
if true {
t.Log(`Skipping the datapoint flag check for staleness markers, as the current receiver doesnt yet set the flag true for staleNaNs`)
return true
}

for _, m := range metrics {
switch m.Name() {
case "up", "scrape_duration_seconds", "scrape_samples_scraped", "scrape_samples_post_metric_relabeling", "scrape_series_added":
continue
}
switch m.DataType() {
case pdata.MetricDataTypeGauge:
for i := 0; i < m.Gauge().DataPoints().Len(); i++ {
if !m.Gauge().DataPoints().At(i).Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) {
return false
}
}
case pdata.MetricDataTypeSum:
for i := 0; i < m.Sum().DataPoints().Len(); i++ {
if !m.Sum().DataPoints().At(i).Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) {
return false
}
}
case pdata.MetricDataTypeHistogram:
for i := 0; i < m.Histogram().DataPoints().Len(); i++ {
if !m.Histogram().DataPoints().At(i).Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) {
return false
}
}
case pdata.MetricDataTypeSummary:
for i := 0; i < m.Summary().DataPoints().Len(); i++ {
if !m.Summary().DataPoints().At(i).Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) {
return false
}
}
}
}
return true
}

Expand Down Expand Up @@ -282,10 +319,10 @@ func doCompare(t *testing.T, name string, want pdata.AttributeMap, got *pdata.Re
assert.Equal(t, expectedScrapeMetricCount, countScrapeMetricsRM(got))
assert.Equal(t, want.Len(), got.Resource().Attributes().Len())
for k, v := range want.AsRaw() {
value, ok := got.Resource().Attributes().Get(k)
val, ok := got.Resource().Attributes().Get(k)
assert.True(t, ok, "%q attribute is missing", k)
if ok {
assert.EqualValues(t, v, value.AsString())
assert.EqualValues(t, v, val.AsString())
}
}
for _, e := range expectations {
Expand Down Expand Up @@ -355,11 +392,11 @@ func compareAttributes(attributes map[string]string) numberPointComparator {
req := assert.Equal(t, len(attributes), numberDataPoint.Attributes().Len(), "Attributes length do not match")
if req {
for k, v := range attributes {
value, ok := numberDataPoint.Attributes().Get(k)
val, ok := numberDataPoint.Attributes().Get(k)
if ok {
assert.Equal(t, v, value.AsString(), "Attributes do not match")
assert.Equal(t, v, val.AsString(), "Attributes do not match")
} else {
assert.Fail(t, "Attributes key do not match")
assert.Failf(t, "Attributes key does not match: %v", k)
}
}
}
Expand All @@ -371,11 +408,11 @@ func compareSummaryAttributes(attributes map[string]string) summaryPointComparat
req := assert.Equal(t, len(attributes), summaryDataPoint.Attributes().Len(), "Summary attributes length do not match")
if req {
for k, v := range attributes {
value, ok := summaryDataPoint.Attributes().Get(k)
val, ok := summaryDataPoint.Attributes().Get(k)
if ok {
assert.Equal(t, v, value.AsString(), "Summary attributes value do not match")
assert.Equal(t, v, val.AsString(), "Summary attributes value do not match")
} else {
assert.Fail(t, "Summary attributes key do not match")
assert.Failf(t, "Summary attributes key does not match: %v", k)
}
}
}
Expand All @@ -387,9 +424,9 @@ func compareHistogramAttributes(attributes map[string]string) histogramPointComp
req := assert.Equal(t, len(attributes), histogramDataPoint.Attributes().Len(), "Histogram attributes length do not match")
if req {
for k, v := range attributes {
value, ok := histogramDataPoint.Attributes().Get(k)
val, ok := histogramDataPoint.Attributes().Get(k)
if ok {
assert.Equal(t, v, value.AsString(), "Histogram attributes value do not match")
assert.Equal(t, v, val.AsString(), "Histogram attributes value do not match")
} else {
assert.Fail(t, "Histogram attributes key do not match")
}
Expand Down Expand Up @@ -455,90 +492,54 @@ func compareSummary(count uint64, sum float64, quantiles [][]float64) summaryPoi
req := assert.Equal(t, len(quantiles), summaryDataPoint.QuantileValues().Len())
if req {
for i := 0; i < summaryDataPoint.QuantileValues().Len(); i++ {
assert.Equal(t, quantiles[i][0], summaryDataPoint.QuantileValues().At(i).Quantile(), "Summary quantile do not match")
assert.Equal(t, quantiles[i][1], summaryDataPoint.QuantileValues().At(i).Value(), "Summary quantile values do not match")
assert.Equal(t, quantiles[i][0], summaryDataPoint.QuantileValues().At(i).Quantile(),
"Summary quantile do not match")
if math.IsNaN(quantiles[i][1]) {
assert.True(t, math.Float64bits(summaryDataPoint.QuantileValues().At(i).Value()) == value.NormalNaN,
"Summary quantile value is not normalNaN as expected")
} else {
assert.Equal(t, quantiles[i][1], summaryDataPoint.QuantileValues().At(i).Value(),
"Summary quantile values do not match")
}
}
}
}
}

func testComponent(t *testing.T, targets []*testData, useStartTimeMetric bool, startTimeMetricRegex string) {
// starts prometheus receiver with custom config, retrieves metrics from MetricsSink
func testComponent(t *testing.T, targets []*testData, useStartTimeMetric bool, startTimeMetricRegex string, cfgMuts ...func(*promcfg.Config)) {
for _, pdataDirect := range []bool{false, true} {
pipelineType := "OpenCensus"
if pdataDirect {
pipelineType = "pdata"
}
t.Run(pipelineType, func(t *testing.T) { // 1. setup mock server
t.Run(pipelineType, func(t *testing.T) {
ctx := context.Background()
mp, cfg, err := setupMockPrometheus(targets...)
for _, cfgMut := range cfgMuts {
cfgMut(cfg)
}
require.Nilf(t, err, "Failed to create Prometheus config: %v", err)
defer mp.Close()

cms := new(consumertest.MetricsSink)
rcvr := newPrometheusReceiver(componenttest.NewNopReceiverCreateSettings(), &Config{
receiver := newPrometheusReceiver(componenttest.NewNopReceiverCreateSettings(), &Config{
ReceiverSettings: config.NewReceiverSettings(config.NewComponentID(typeStr)),
PrometheusConfig: cfg,
UseStartTimeMetric: useStartTimeMetric,
StartTimeMetricRegex: startTimeMetricRegex,
pdataDirect: pdataDirect,
}, cms)

require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost()), "Failed to invoke Start: %v", err)
t.Cleanup(func() {
// verify state after shutdown is called
assert.Lenf(t, flattenTargets(rcvr.scrapeManager.TargetsAll()), len(targets), "expected %v targets to be running", len(targets))
require.NoError(t, rcvr.Shutdown(context.Background()))
assert.Len(t, flattenTargets(rcvr.scrapeManager.TargetsAll()), 0, "expected scrape manager to have no targets")
})

// wait for all provided data to be scraped
mp.wg.Wait()
metrics := cms.AllMetrics()

// split and store results by target name
pResults := splitMetricsByTarget(metrics)
lres, lep := len(pResults), len(mp.endpoints)
assert.Equalf(t, lep, lres, "want %d targets, but got %v\n", lep, lres)

// loop to validate outputs for each targets
for _, target := range targets {
t.Run(target.name, func(t *testing.T) {
validScrapes := pResults[target.name]
if !target.useOpenMetrics {
validScrapes = getValidScrapes(t, pResults[target.name])
}
target.validateFunc(t, target, validScrapes)
})
}
})
}
}

// starts prometheus receiver with custom config, retrieves metrics from MetricsSink
func testComponentCustomConfig(t *testing.T, targets []*testData, cfgMut func(*promcfg.Config)) {
for _, pdataDirect := range []bool{false, true} {
pipelineType := "OpenCensus"
if pdataDirect {
pipelineType = "pdata"
}
t.Run(pipelineType, func(t *testing.T) {
ctx := context.Background()
mp, cfg, err := setupMockPrometheus(targets...)
cfgMut(cfg)
require.Nilf(t, err, "Failed to create Prometheus config: %v", err)
defer mp.Close()

cms := new(consumertest.MetricsSink)
receiver := newPrometheusReceiver(componenttest.NewNopReceiverCreateSettings(), &Config{
ReceiverSettings: config.NewReceiverSettings(config.NewComponentID(typeStr)),
PrometheusConfig: cfg,
pdataDirect: pdataDirect,
}, cms)

require.NoError(t, receiver.Start(ctx, componenttest.NewNopHost()))

// verify state after shutdown is called
t.Cleanup(func() { require.NoError(t, receiver.Shutdown(ctx)) })

t.Cleanup(func() {
// verify state after shutdown is called
assert.Lenf(t, flattenTargets(receiver.scrapeManager.TargetsAll()), len(targets), "expected %v targets to be running", len(targets))
require.NoError(t, receiver.Shutdown(context.Background()))
assert.Len(t, flattenTargets(receiver.scrapeManager.TargetsAll()), 0, "expected scrape manager to have no targets")
})
// wait for all provided data to be scraped
mp.wg.Wait()
metrics := cms.AllMetrics()
Expand All @@ -551,11 +552,11 @@ func testComponentCustomConfig(t *testing.T, targets []*testData, cfgMut func(*p
// loop to validate outputs for each targets
for _, target := range targets {
t.Run(target.name, func(t *testing.T) {
validScrapes := pResults[target.name]
if !target.useOpenMetrics {
validScrapes = getValidScrapes(t, pResults[target.name])
scrapes := pResults[target.name]
if !target.validateScrapes {
scrapes = getValidScrapes(t, pResults[target.name])
}
target.validateFunc(t, target, validScrapes)
target.validateFunc(t, target, scrapes)
})
}
})
Expand Down
16 changes: 8 additions & 8 deletions receiver/prometheusreceiver/metrics_receiver_labels_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,13 @@ func TestExternalLabels(t *testing.T) {
},
}

testComponentCustomConfig(t, targets, func(cfg *promcfg.Config) {
testComponent(t, targets, false, "", func(cfg *promcfg.Config) {
cfg.GlobalConfig.ExternalLabels = labels.FromStrings("key", "value")
})
}

func verifyExternalLabels(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) {
verifyNumScrapeResults(t, td, rms)
verifyNumValidScrapeResults(t, td, rms)
require.Greater(t, len(rms), 0, "At least one resource metric should be present")

wantAttributes := td.attributes
Expand Down Expand Up @@ -74,7 +74,7 @@ test_gauge0{label1="value1",label2="value2"} 10

func verifyLabelLimitTarget1(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) {
//each sample in the scraped metrics is within the configured label_limit, scrape should be successful
verifyNumScrapeResults(t, td, rms)
verifyNumValidScrapeResults(t, td, rms)
require.Greater(t, len(rms), 0, "At least one resource metric should be present")

want := td.attributes
Expand Down Expand Up @@ -129,7 +129,7 @@ func TestLabelLimitConfig(t *testing.T) {
},
}

testComponentCustomConfig(t, targets, func(cfg *promcfg.Config) {
testComponent(t, targets, false, "", func(cfg *promcfg.Config) {
// set label limit in scrape_config
for _, scrapeCfg := range cfg.ScrapeConfigs {
scrapeCfg.LabelLimit = 5
Expand Down Expand Up @@ -165,7 +165,7 @@ test_summary0_count{label1="value1",label2="value2"} 1000
`

func verifyLabelConfigTarget1(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) {
verifyNumScrapeResults(t, td, rms)
verifyNumValidScrapeResults(t, td, rms)
require.Greater(t, len(rms), 0, "At least one resource metric should be present")

want := td.attributes
Expand Down Expand Up @@ -252,7 +252,7 @@ func TestLabelNameLimitConfig(t *testing.T) {
},
}

testComponentCustomConfig(t, targets, func(cfg *promcfg.Config) {
testComponent(t, targets, false, "", func(cfg *promcfg.Config) {
// set label limit in scrape_config
for _, scrapeCfg := range cfg.ScrapeConfigs {
scrapeCfg.LabelNameLengthLimit = 20
Expand Down Expand Up @@ -288,7 +288,7 @@ func TestLabelValueLimitConfig(t *testing.T) {
},
}

testComponentCustomConfig(t, targets, func(cfg *promcfg.Config) {
testComponent(t, targets, false, "", func(cfg *promcfg.Config) {
// set label name limit in scrape_config
for _, scrapeCfg := range cfg.ScrapeConfigs {
scrapeCfg.LabelValueLengthLimit = 25
Expand Down Expand Up @@ -550,7 +550,7 @@ func TestHonorLabelsTrueConfig(t *testing.T) {
},
}

testComponentCustomConfig(t, targets, func(cfg *promcfg.Config) {
testComponent(t, targets, false, "", func(cfg *promcfg.Config) {
// set label name limit in scrape_config
for _, scrapeCfg := range cfg.ScrapeConfigs {
scrapeCfg.HonorLabels = true
Expand Down
Loading

0 comments on commit 08ed4f5

Please sign in to comment.