Skip to content

Commit

Permalink
Added disk merged metric for Linux & updated tests (open-telemetry#1267)
Browse files Browse the repository at this point in the history
  • Loading branch information
james-bebbington authored and wyTrivail committed Jul 13, 2020
1 parent 926a151 commit 74423a0
Show file tree
Hide file tree
Showing 6 changed files with 142 additions and 34 deletions.
2 changes: 1 addition & 1 deletion receiver/hostmetricsreceiver/hostmetrics_receiver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ var resourceMetrics = []string{
}

var systemSpecificMetrics = map[string][]string{
"linux": {"system.filesystem.inodes.usage", "system.processes.running", "system.processes.blocked", "system.swap.page_faults"},
"linux": {"system.disk.merged", "system.filesystem.inodes.usage", "system.processes.running", "system.processes.blocked", "system.swap.page_faults"},
"darwin": {"system.filesystem.inodes.usage", "system.processes.running", "system.processes.blocked", "system.swap.page_faults"},
"freebsd": {"system.filesystem.inodes.usage", "system.processes.running", "system.processes.blocked", "system.swap.page_faults"},
"openbsd": {"system.filesystem.inodes.usage", "system.processes.running", "system.processes.blocked", "system.swap.page_faults"},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,3 +63,13 @@ var diskTimeDescriptor = func() pdata.MetricDescriptor {
descriptor.SetType(pdata.MetricTypeMonotonicInt64)
return descriptor
}()

var diskMergedDescriptor = func() pdata.MetricDescriptor {
descriptor := pdata.NewMetricDescriptor()
descriptor.InitEmpty()
descriptor.SetName("system.disk.merged")
descriptor.SetDescription("The number of disk reads merged into single physical disk access operations.")
descriptor.SetUnit("1")
descriptor.SetType(pdata.MetricTypeMonotonicInt64)
return descriptor
}()
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,14 @@ import (
type scraper struct {
config *Config
startTime pdata.TimestampUnixNano

// for mocking gopsutil disk.IOCounters
ioCounters func(names ...string) (map[string]disk.IOCountersStat, error)
}

// newDiskScraper creates a Disk Scraper
func newDiskScraper(_ context.Context, cfg *Config) *scraper {
return &scraper{config: cfg}
return &scraper{config: cfg, ioCounters: disk.IOCounters}
}

// Initialize
Expand All @@ -55,19 +58,20 @@ func (s *scraper) Close(_ context.Context) error {
func (s *scraper) ScrapeMetrics(_ context.Context) (pdata.MetricSlice, error) {
metrics := pdata.NewMetricSlice()

ioCounters, err := disk.IOCounters()
ioCounters, err := s.ioCounters()
if err != nil {
return metrics, err
}

metrics.Resize(3)
initializeDiskIOMetric(metrics.At(0), ioCounters, s.startTime)
initializeDiskOpsMetric(metrics.At(1), ioCounters, s.startTime)
initializeDiskTimeMetric(metrics.At(2), ioCounters, s.startTime)
metrics.Resize(3 + systemSpecificMetricsLen)
initializeDiskIOMetric(metrics.At(0), s.startTime, ioCounters)
initializeDiskOpsMetric(metrics.At(1), s.startTime, ioCounters)
initializeDiskTimeMetric(metrics.At(2), s.startTime, ioCounters)
appendSystemSpecificMetrics(metrics, 3, s.startTime, ioCounters)
return metrics, nil
}

func initializeDiskIOMetric(metric pdata.Metric, ioCounters map[string]disk.IOCountersStat, startTime pdata.TimestampUnixNano) {
func initializeDiskIOMetric(metric pdata.Metric, startTime pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) {
diskIODescriptor.CopyTo(metric.MetricDescriptor())

idps := metric.Int64DataPoints()
Expand All @@ -81,7 +85,7 @@ func initializeDiskIOMetric(metric pdata.Metric, ioCounters map[string]disk.IOCo
}
}

func initializeDiskOpsMetric(metric pdata.Metric, ioCounters map[string]disk.IOCountersStat, startTime pdata.TimestampUnixNano) {
func initializeDiskOpsMetric(metric pdata.Metric, startTime pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) {
diskOpsDescriptor.CopyTo(metric.MetricDescriptor())

idps := metric.Int64DataPoints()
Expand All @@ -95,7 +99,7 @@ func initializeDiskOpsMetric(metric pdata.Metric, ioCounters map[string]disk.IOC
}
}

func initializeDiskTimeMetric(metric pdata.Metric, ioCounters map[string]disk.IOCountersStat, startTime pdata.TimestampUnixNano) {
func initializeDiskTimeMetric(metric pdata.Metric, startTime pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) {
diskTimeDescriptor.CopyTo(metric.MetricDescriptor())

idps := metric.Int64DataPoints()
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// +build linux

package diskscraper

import (
"github.com/shirou/gopsutil/disk"

"go.opentelemetry.io/collector/consumer/pdata"
)

const systemSpecificMetricsLen = 1

func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startIdx int, startTime pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) {
metric := metrics.At(startIdx)
diskMergedDescriptor.CopyTo(metric.MetricDescriptor())

idps := metric.Int64DataPoints()
idps.Resize(2 * len(ioCounters))

idx := 0
for device, ioCounter := range ioCounters {
initializeDataPoint(idps.At(idx+0), startTime, device, readDirectionLabelValue, int64(ioCounter.MergedReadCount))
initializeDataPoint(idps.At(idx+1), startTime, device, writeDirectionLabelValue, int64(ioCounter.MergedWriteCount))
idx += 2
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// +build !linux

package diskscraper

import (
"github.com/shirou/gopsutil/disk"

"go.opentelemetry.io/collector/consumer/pdata"
)

const systemSpecificMetricsLen = 0

func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startIdx int, startTime pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) {
}
Original file line number Diff line number Diff line change
Expand Up @@ -16,44 +16,70 @@ package diskscraper

import (
"context"
"errors"
"runtime"
"testing"

"github.com/shirou/gopsutil/disk"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

"go.opentelemetry.io/collector/consumer/pdata"
"go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal"
)

type validationFn func(*testing.T, pdata.MetricSlice)

func TestScrapeMetrics(t *testing.T) {
createScraperAndValidateScrapedMetrics(t, &Config{}, func(t *testing.T, metrics pdata.MetricSlice) {
// expect 3 metrics
assert.Equal(t, 3, metrics.Len())

// for each disk metric, expect a read & write datapoint for at least one drive
assertDiskMetricMatchesDescriptorAndHasReadAndWriteDataPoints(t, metrics.At(0), diskIODescriptor)
assertDiskMetricMatchesDescriptorAndHasReadAndWriteDataPoints(t, metrics.At(1), diskOpsDescriptor)
assertDiskMetricMatchesDescriptorAndHasReadAndWriteDataPoints(t, metrics.At(2), diskTimeDescriptor)
})
type testCase struct {
name string
ioCountersFunc func(names ...string) (map[string]disk.IOCountersStat, error)
expectedErr string
}

testCases := []testCase{
{
name: "Standard",
},
{
name: "Error",
ioCountersFunc: func(names ...string) (map[string]disk.IOCountersStat, error) { return nil, errors.New("err1") },
expectedErr: "err1",
},
}

for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
scraper := newDiskScraper(context.Background(), &Config{})
if test.ioCountersFunc != nil {
scraper.ioCounters = test.ioCountersFunc
}

err := scraper.Initialize(context.Background())
require.NoError(t, err, "Failed to initialize disk scraper: %v", err)
defer func() { assert.NoError(t, scraper.Close(context.Background())) }()

metrics, err := scraper.ScrapeMetrics(context.Background())
if test.expectedErr != "" {
assert.EqualError(t, err, test.expectedErr)
return
}
require.NoError(t, err, "Failed to scrape metrics: %v", err)

assert.GreaterOrEqual(t, metrics.Len(), 3)

assertDiskMetricValid(t, metrics.At(0), diskIODescriptor)
assertDiskMetricValid(t, metrics.At(1), diskOpsDescriptor)
assertDiskMetricValid(t, metrics.At(2), diskTimeDescriptor)

if runtime.GOOS == "Linux" {
assertDiskMetricValid(t, metrics.At(3), diskMergedDescriptor)
}
})
}
}

func assertDiskMetricMatchesDescriptorAndHasReadAndWriteDataPoints(t *testing.T, metric pdata.Metric, expectedDescriptor pdata.MetricDescriptor) {
func assertDiskMetricValid(t *testing.T, metric pdata.Metric, expectedDescriptor pdata.MetricDescriptor) {
internal.AssertDescriptorEqual(t, expectedDescriptor, metric.MetricDescriptor())
assert.GreaterOrEqual(t, metric.Int64DataPoints().Len(), 2)
internal.AssertInt64MetricLabelHasValue(t, metric, 0, directionLabelName, readDirectionLabelValue)
internal.AssertInt64MetricLabelHasValue(t, metric, 1, directionLabelName, writeDirectionLabelValue)
}

func createScraperAndValidateScrapedMetrics(t *testing.T, config *Config, assertFn validationFn) {
scraper := newDiskScraper(context.Background(), config)
err := scraper.Initialize(context.Background())
require.NoError(t, err, "Failed to initialize disk scraper: %v", err)
defer func() { assert.NoError(t, scraper.Close(context.Background())) }()

metrics, err := scraper.ScrapeMetrics(context.Background())
require.NoError(t, err, "Failed to scrape metrics: %v", err)

assertFn(t, metrics)
}

0 comments on commit 74423a0

Please sign in to comment.