From 08ed4f559c54c06a9189246b1ac634fa6a1d352a Mon Sep 17 00:00:00 2001 From: Paurush Garg <62579325+PaurushGarg@users.noreply.github.com> Date: Fri, 10 Dec 2021 13:13:36 -0500 Subject: [PATCH] Added testcases for default metrics and resolved feedback (#6681) --- .../metrics_receiver_helper_test.go | 171 +++++++++--------- .../metrics_receiver_labels_test.go | 16 +- .../metrics_receiver_open_metrics_test.go | 12 +- .../metrics_receiver_test.go | 109 ++++++++++- 4 files changed, 201 insertions(+), 107 deletions(-) diff --git a/receiver/prometheusreceiver/metrics_receiver_helper_test.go b/receiver/prometheusreceiver/metrics_receiver_helper_test.go index 117981119c44e..00d8740a9f82e 100644 --- a/receiver/prometheusreceiver/metrics_receiver_helper_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_helper_test.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "log" + "math" "net/http" "net/http/httptest" "net/url" @@ -27,6 +28,7 @@ import ( gokitlog "github.com/go-kit/log" promcfg "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/pkg/value" "github.com/prometheus/prometheus/scrape" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -110,11 +112,11 @@ var ( ) type testData struct { - name string - pages []mockPrometheusResponse - attributes pdata.AttributeMap - useOpenMetrics bool - validateFunc func(t *testing.T, td *testData, result []*pdata.ResourceMetrics) + name string + pages []mockPrometheusResponse + attributes pdata.AttributeMap + validateScrapes bool + validateFunc func(t *testing.T, td *testData, result []*pdata.ResourceMetrics) } // setupMockPrometheus to create a mocked prometheus based on targets, returning the server and a prometheus exporting @@ -124,9 +126,6 @@ func setupMockPrometheus(tds ...*testData) (*mockPrometheus, *promcfg.Config, er endpoints := make(map[string][]mockPrometheusResponse) metricPaths := make([]string, 0) for _, t := range tds { - for i := range t.pages { - t.pages[i].useOpenMetrics = t.useOpenMetrics - } metricPath := fmt.Sprintf("/%s/metrics", t.name) endpoints[metricPath] = t.pages metricPaths = append(metricPaths, metricPath) @@ -137,7 +136,7 @@ func setupMockPrometheus(tds ...*testData) (*mockPrometheus, *promcfg.Config, er job := make(map[string]interface{}) job["job_name"] = tds[i].name job["metrics_path"] = metricPaths[i] - job["scrape_interval"] = "1s" + job["scrape_interval"] = "100ms" job["static_configs"] = []map[string]interface{}{{"targets": []string{u.Host}}} jobs = append(jobs, job) } @@ -158,7 +157,7 @@ func setupMockPrometheus(tds ...*testData) (*mockPrometheus, *promcfg.Config, er return mp, pCfg, err } -func verifyNumScrapeResults(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyNumValidScrapeResults(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { want := 0 for _, p := range td.pages { if p.code == 200 { @@ -197,7 +196,7 @@ func getValidScrapes(t *testing.T, rms []*pdata.ResourceMetrics) []*pdata.Resour for i := 0; i < len(rms); i++ { allMetrics := getMetrics(rms[i]) if expectedScrapeMetricCount < len(allMetrics) && countScrapeMetrics(allMetrics) == expectedScrapeMetricCount { - if isFirstFailedScrape(allMetrics) { + if isFirstFailedScrape(t, allMetrics) { continue } assertUp(t, 1, allMetrics) @@ -209,7 +208,7 @@ func getValidScrapes(t *testing.T, rms []*pdata.ResourceMetrics) []*pdata.Resour return out } -func isFirstFailedScrape(metrics []*pdata.Metric) bool { +func isFirstFailedScrape(t *testing.T, metrics []*pdata.Metric) bool { for _, m := range metrics { if m.Name() == "up" { if m.Gauge().DataPoints().At(0).DoubleVal() == 1 { // assumed up will not have multiple datapoints @@ -217,6 +216,44 @@ func isFirstFailedScrape(metrics []*pdata.Metric) bool { } } } + // TODO: Issue #6376. Remove this skip once OTLP format is directly used in Prometheus Receiver. + if true { + t.Log(`Skipping the datapoint flag check for staleness markers, as the current receiver doesnt yet set the flag true for staleNaNs`) + return true + } + + for _, m := range metrics { + switch m.Name() { + case "up", "scrape_duration_seconds", "scrape_samples_scraped", "scrape_samples_post_metric_relabeling", "scrape_series_added": + continue + } + switch m.DataType() { + case pdata.MetricDataTypeGauge: + for i := 0; i < m.Gauge().DataPoints().Len(); i++ { + if !m.Gauge().DataPoints().At(i).Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + return false + } + } + case pdata.MetricDataTypeSum: + for i := 0; i < m.Sum().DataPoints().Len(); i++ { + if !m.Sum().DataPoints().At(i).Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + return false + } + } + case pdata.MetricDataTypeHistogram: + for i := 0; i < m.Histogram().DataPoints().Len(); i++ { + if !m.Histogram().DataPoints().At(i).Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + return false + } + } + case pdata.MetricDataTypeSummary: + for i := 0; i < m.Summary().DataPoints().Len(); i++ { + if !m.Summary().DataPoints().At(i).Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + return false + } + } + } + } return true } @@ -282,10 +319,10 @@ func doCompare(t *testing.T, name string, want pdata.AttributeMap, got *pdata.Re assert.Equal(t, expectedScrapeMetricCount, countScrapeMetricsRM(got)) assert.Equal(t, want.Len(), got.Resource().Attributes().Len()) for k, v := range want.AsRaw() { - value, ok := got.Resource().Attributes().Get(k) + val, ok := got.Resource().Attributes().Get(k) assert.True(t, ok, "%q attribute is missing", k) if ok { - assert.EqualValues(t, v, value.AsString()) + assert.EqualValues(t, v, val.AsString()) } } for _, e := range expectations { @@ -355,11 +392,11 @@ func compareAttributes(attributes map[string]string) numberPointComparator { req := assert.Equal(t, len(attributes), numberDataPoint.Attributes().Len(), "Attributes length do not match") if req { for k, v := range attributes { - value, ok := numberDataPoint.Attributes().Get(k) + val, ok := numberDataPoint.Attributes().Get(k) if ok { - assert.Equal(t, v, value.AsString(), "Attributes do not match") + assert.Equal(t, v, val.AsString(), "Attributes do not match") } else { - assert.Fail(t, "Attributes key do not match") + assert.Failf(t, "Attributes key does not match: %v", k) } } } @@ -371,11 +408,11 @@ func compareSummaryAttributes(attributes map[string]string) summaryPointComparat req := assert.Equal(t, len(attributes), summaryDataPoint.Attributes().Len(), "Summary attributes length do not match") if req { for k, v := range attributes { - value, ok := summaryDataPoint.Attributes().Get(k) + val, ok := summaryDataPoint.Attributes().Get(k) if ok { - assert.Equal(t, v, value.AsString(), "Summary attributes value do not match") + assert.Equal(t, v, val.AsString(), "Summary attributes value do not match") } else { - assert.Fail(t, "Summary attributes key do not match") + assert.Failf(t, "Summary attributes key does not match: %v", k) } } } @@ -387,9 +424,9 @@ func compareHistogramAttributes(attributes map[string]string) histogramPointComp req := assert.Equal(t, len(attributes), histogramDataPoint.Attributes().Len(), "Histogram attributes length do not match") if req { for k, v := range attributes { - value, ok := histogramDataPoint.Attributes().Get(k) + val, ok := histogramDataPoint.Attributes().Get(k) if ok { - assert.Equal(t, v, value.AsString(), "Histogram attributes value do not match") + assert.Equal(t, v, val.AsString(), "Histogram attributes value do not match") } else { assert.Fail(t, "Histogram attributes key do not match") } @@ -455,26 +492,38 @@ func compareSummary(count uint64, sum float64, quantiles [][]float64) summaryPoi req := assert.Equal(t, len(quantiles), summaryDataPoint.QuantileValues().Len()) if req { for i := 0; i < summaryDataPoint.QuantileValues().Len(); i++ { - assert.Equal(t, quantiles[i][0], summaryDataPoint.QuantileValues().At(i).Quantile(), "Summary quantile do not match") - assert.Equal(t, quantiles[i][1], summaryDataPoint.QuantileValues().At(i).Value(), "Summary quantile values do not match") + assert.Equal(t, quantiles[i][0], summaryDataPoint.QuantileValues().At(i).Quantile(), + "Summary quantile do not match") + if math.IsNaN(quantiles[i][1]) { + assert.True(t, math.Float64bits(summaryDataPoint.QuantileValues().At(i).Value()) == value.NormalNaN, + "Summary quantile value is not normalNaN as expected") + } else { + assert.Equal(t, quantiles[i][1], summaryDataPoint.QuantileValues().At(i).Value(), + "Summary quantile values do not match") + } } } } } -func testComponent(t *testing.T, targets []*testData, useStartTimeMetric bool, startTimeMetricRegex string) { +// starts prometheus receiver with custom config, retrieves metrics from MetricsSink +func testComponent(t *testing.T, targets []*testData, useStartTimeMetric bool, startTimeMetricRegex string, cfgMuts ...func(*promcfg.Config)) { for _, pdataDirect := range []bool{false, true} { pipelineType := "OpenCensus" if pdataDirect { pipelineType = "pdata" } - t.Run(pipelineType, func(t *testing.T) { // 1. setup mock server + t.Run(pipelineType, func(t *testing.T) { + ctx := context.Background() mp, cfg, err := setupMockPrometheus(targets...) + for _, cfgMut := range cfgMuts { + cfgMut(cfg) + } require.Nilf(t, err, "Failed to create Prometheus config: %v", err) defer mp.Close() cms := new(consumertest.MetricsSink) - rcvr := newPrometheusReceiver(componenttest.NewNopReceiverCreateSettings(), &Config{ + receiver := newPrometheusReceiver(componenttest.NewNopReceiverCreateSettings(), &Config{ ReceiverSettings: config.NewReceiverSettings(config.NewComponentID(typeStr)), PrometheusConfig: cfg, UseStartTimeMetric: useStartTimeMetric, @@ -482,63 +531,15 @@ func testComponent(t *testing.T, targets []*testData, useStartTimeMetric bool, s pdataDirect: pdataDirect, }, cms) - require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost()), "Failed to invoke Start: %v", err) - t.Cleanup(func() { - // verify state after shutdown is called - assert.Lenf(t, flattenTargets(rcvr.scrapeManager.TargetsAll()), len(targets), "expected %v targets to be running", len(targets)) - require.NoError(t, rcvr.Shutdown(context.Background())) - assert.Len(t, flattenTargets(rcvr.scrapeManager.TargetsAll()), 0, "expected scrape manager to have no targets") - }) - - // wait for all provided data to be scraped - mp.wg.Wait() - metrics := cms.AllMetrics() - - // split and store results by target name - pResults := splitMetricsByTarget(metrics) - lres, lep := len(pResults), len(mp.endpoints) - assert.Equalf(t, lep, lres, "want %d targets, but got %v\n", lep, lres) - - // loop to validate outputs for each targets - for _, target := range targets { - t.Run(target.name, func(t *testing.T) { - validScrapes := pResults[target.name] - if !target.useOpenMetrics { - validScrapes = getValidScrapes(t, pResults[target.name]) - } - target.validateFunc(t, target, validScrapes) - }) - } - }) - } -} - -// starts prometheus receiver with custom config, retrieves metrics from MetricsSink -func testComponentCustomConfig(t *testing.T, targets []*testData, cfgMut func(*promcfg.Config)) { - for _, pdataDirect := range []bool{false, true} { - pipelineType := "OpenCensus" - if pdataDirect { - pipelineType = "pdata" - } - t.Run(pipelineType, func(t *testing.T) { - ctx := context.Background() - mp, cfg, err := setupMockPrometheus(targets...) - cfgMut(cfg) - require.Nilf(t, err, "Failed to create Prometheus config: %v", err) - defer mp.Close() - - cms := new(consumertest.MetricsSink) - receiver := newPrometheusReceiver(componenttest.NewNopReceiverCreateSettings(), &Config{ - ReceiverSettings: config.NewReceiverSettings(config.NewComponentID(typeStr)), - PrometheusConfig: cfg, - pdataDirect: pdataDirect, - }, cms) - require.NoError(t, receiver.Start(ctx, componenttest.NewNopHost())) // verify state after shutdown is called - t.Cleanup(func() { require.NoError(t, receiver.Shutdown(ctx)) }) - + t.Cleanup(func() { + // verify state after shutdown is called + assert.Lenf(t, flattenTargets(receiver.scrapeManager.TargetsAll()), len(targets), "expected %v targets to be running", len(targets)) + require.NoError(t, receiver.Shutdown(context.Background())) + assert.Len(t, flattenTargets(receiver.scrapeManager.TargetsAll()), 0, "expected scrape manager to have no targets") + }) // wait for all provided data to be scraped mp.wg.Wait() metrics := cms.AllMetrics() @@ -551,11 +552,11 @@ func testComponentCustomConfig(t *testing.T, targets []*testData, cfgMut func(*p // loop to validate outputs for each targets for _, target := range targets { t.Run(target.name, func(t *testing.T) { - validScrapes := pResults[target.name] - if !target.useOpenMetrics { - validScrapes = getValidScrapes(t, pResults[target.name]) + scrapes := pResults[target.name] + if !target.validateScrapes { + scrapes = getValidScrapes(t, pResults[target.name]) } - target.validateFunc(t, target, validScrapes) + target.validateFunc(t, target, scrapes) }) } }) diff --git a/receiver/prometheusreceiver/metrics_receiver_labels_test.go b/receiver/prometheusreceiver/metrics_receiver_labels_test.go index 4ff8671cec0b2..f225b7548dc73 100644 --- a/receiver/prometheusreceiver/metrics_receiver_labels_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_labels_test.go @@ -39,13 +39,13 @@ func TestExternalLabels(t *testing.T) { }, } - testComponentCustomConfig(t, targets, func(cfg *promcfg.Config) { + testComponent(t, targets, false, "", func(cfg *promcfg.Config) { cfg.GlobalConfig.ExternalLabels = labels.FromStrings("key", "value") }) } func verifyExternalLabels(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) { - verifyNumScrapeResults(t, td, rms) + verifyNumValidScrapeResults(t, td, rms) require.Greater(t, len(rms), 0, "At least one resource metric should be present") wantAttributes := td.attributes @@ -74,7 +74,7 @@ test_gauge0{label1="value1",label2="value2"} 10 func verifyLabelLimitTarget1(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) { //each sample in the scraped metrics is within the configured label_limit, scrape should be successful - verifyNumScrapeResults(t, td, rms) + verifyNumValidScrapeResults(t, td, rms) require.Greater(t, len(rms), 0, "At least one resource metric should be present") want := td.attributes @@ -129,7 +129,7 @@ func TestLabelLimitConfig(t *testing.T) { }, } - testComponentCustomConfig(t, targets, func(cfg *promcfg.Config) { + testComponent(t, targets, false, "", func(cfg *promcfg.Config) { // set label limit in scrape_config for _, scrapeCfg := range cfg.ScrapeConfigs { scrapeCfg.LabelLimit = 5 @@ -165,7 +165,7 @@ test_summary0_count{label1="value1",label2="value2"} 1000 ` func verifyLabelConfigTarget1(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) { - verifyNumScrapeResults(t, td, rms) + verifyNumValidScrapeResults(t, td, rms) require.Greater(t, len(rms), 0, "At least one resource metric should be present") want := td.attributes @@ -252,7 +252,7 @@ func TestLabelNameLimitConfig(t *testing.T) { }, } - testComponentCustomConfig(t, targets, func(cfg *promcfg.Config) { + testComponent(t, targets, false, "", func(cfg *promcfg.Config) { // set label limit in scrape_config for _, scrapeCfg := range cfg.ScrapeConfigs { scrapeCfg.LabelNameLengthLimit = 20 @@ -288,7 +288,7 @@ func TestLabelValueLimitConfig(t *testing.T) { }, } - testComponentCustomConfig(t, targets, func(cfg *promcfg.Config) { + testComponent(t, targets, false, "", func(cfg *promcfg.Config) { // set label name limit in scrape_config for _, scrapeCfg := range cfg.ScrapeConfigs { scrapeCfg.LabelValueLengthLimit = 25 @@ -550,7 +550,7 @@ func TestHonorLabelsTrueConfig(t *testing.T) { }, } - testComponentCustomConfig(t, targets, func(cfg *promcfg.Config) { + testComponent(t, targets, false, "", func(cfg *promcfg.Config) { // set label name limit in scrape_config for _, scrapeCfg := range cfg.ScrapeConfigs { scrapeCfg.HonorLabels = true diff --git a/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go b/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go index 5ec7f5dc777b5..72f5efb1f5bbc 100644 --- a/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go @@ -66,10 +66,10 @@ func TestOpenMetricsPositive(t *testing.T) { testData := &testData{ name: k, pages: []mockPrometheusResponse{ - {code: 200, data: v}, + {code: 200, data: v, useOpenMetrics: true}, }, - validateFunc: verifyPositiveTarget, - useOpenMetrics: true, + validateFunc: verifyPositiveTarget, + validateScrapes: true, } targets = append(targets, testData) } @@ -98,10 +98,10 @@ func TestOpenMetricsNegative(t *testing.T) { testData := &testData{ name: k, pages: []mockPrometheusResponse{ - {code: 200, data: v}, + {code: 200, data: v, useOpenMetrics: true}, }, - validateFunc: verifyNegativeTarget, - useOpenMetrics: true, + validateFunc: verifyNegativeTarget, + validateScrapes: true, } targets = append(targets, testData) } diff --git a/receiver/prometheusreceiver/metrics_receiver_test.go b/receiver/prometheusreceiver/metrics_receiver_test.go index 00eb90ddf7d17..3fb3ab079567a 100644 --- a/receiver/prometheusreceiver/metrics_receiver_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_test.go @@ -18,7 +18,6 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/model/pdata" "google.golang.org/protobuf/types/known/timestamppb" ) @@ -84,9 +83,38 @@ rpc_duration_seconds_sum 5002 rpc_duration_seconds_count 1001 ` +// target1Page3 has lower values than previous scrape. +// So, even after seeing a failed scrape, start_timestamp should be reset for target1Page3 +var target1Page3 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 16 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 99 +http_requests_total{method="post",code="400"} 3 + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 900 +http_request_duration_seconds_bucket{le="0.5"} 1400 +http_request_duration_seconds_bucket{le="1"} 1900 +http_request_duration_seconds_bucket{le="+Inf"} 2400 +http_request_duration_seconds_sum 4900 +http_request_duration_seconds_count 2400 + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 1 +rpc_duration_seconds{quantile="0.9"} 4 +rpc_duration_seconds{quantile="0.99"} 6 +rpc_duration_seconds_sum 4900 +rpc_duration_seconds_count 900 +` + func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { - verifyNumScrapeResults(t, td, resourceMetrics) - require.Greater(t, len(resourceMetrics), 0, "At least one resource metric should be present") + verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] // m1 has 4 metrics + 5 internal scraper metrics @@ -215,6 +243,71 @@ func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }), } doCompare(t, "scrape2", wantAttributes, m2, e2) + + m3 := resourceMetrics[2] + // m3 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m3)) + metricsScrape3 := m3.InstrumentationLibraryMetrics().At(0).Metrics() + ts3 := getTS(metricsScrape3) + e3 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pdata.MetricDataTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts3), + compareDoubleValue(16), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pdata.MetricDataTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + // TODO: #6360 Prometheus Receiver Issue- start_timestamp should reset if the prior scrape had higher value + //compareStartTimestamp(ts3), + compareTimestamp(ts3), + compareDoubleValue(99), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + // TODO: #6360 Prometheus Receiver Issue- start_timestamp should reset if the prior scrape had higher value + //compareStartTimestamp(ts3), + compareTimestamp(ts3), + compareDoubleValue(3), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pdata.MetricDataTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + // TODO: #6360 Prometheus Receiver Issue- start_timestamp should reset if the prior scrape had higher value + //compareHistogramStartTimestamp(ts3), + compareHistogramTimestamp(ts3), + compareHistogram(2400, 4900, []uint64{900, 500, 500, 500}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pdata.MetricDataTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + // TODO: #6360 Prometheus Receiver Issue- start_timestamp should reset if the prior scrape had higher value + //compareSummaryStartTimestamp(ts3), + compareSummaryTimestamp(ts3), + compareSummary(900, 4900, [][]float64{{0.01, 1}, {0.9, 4}, {0.99, 6}}), + }, + }, + }), + } + doCompare(t, "scrape3", wantAttributes, m3, e3) } // target2 is going to have 5 pages, and there's a newly added item on the 2nd page. @@ -280,8 +373,7 @@ http_requests_total{method="post",code="500"} 5 ` func verifyTarget2(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { - verifyNumScrapeResults(t, td, resourceMetrics) - require.Greater(t, len(resourceMetrics), 0, "At least one resource metric should be present") + verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] // m1 has 2 metrics + 5 internal scraper metrics assert.Equal(t, 7, metricsCount(m1)) @@ -590,8 +682,7 @@ rpc_duration_seconds_count{foo="no_quantile"} 55 ` func verifyTarget3(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { - verifyNumScrapeResults(t, td, resourceMetrics) - require.Greater(t, len(resourceMetrics), 0, "At least one resource metric should be present") + verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] // m1 has 3 metrics + 5 internal scraper metrics assert.Equal(t, 8, metricsCount(m1)) @@ -709,6 +800,8 @@ func TestCoreMetricsEndToEnd(t *testing.T) { {code: 200, data: target1Page1}, {code: 500, data: ""}, {code: 200, data: target1Page2}, + {code: 500, data: ""}, + {code: 200, data: target1Page3}, }, validateFunc: verifyTarget1, }, @@ -771,7 +864,7 @@ var startTimeMetricPageStartTimestamp = ×tamppb.Timestamp{Seconds: 400, Nan const numStartTimeMetricPageTimeseries = 11 func verifyStartTimeMetricPage(t *testing.T, td *testData, result []*pdata.ResourceMetrics) { - verifyNumScrapeResults(t, td, result) + verifyNumValidScrapeResults(t, td, result) numTimeseries := 0 for _, rm := range result { metrics := getMetrics(rm)