Skip to content

Commit

Permalink
Remove metric MinMaxSumCount kind aggregation (#2423)
Browse files Browse the repository at this point in the history
* remove metric MinMaxSumCount kind aggregation

* remove related files

* fix unittest

* fix changelog

* remove reference the removed aggregation type

Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com>
  • Loading branch information
hanyuancheung and MrAlias authored Dec 6, 2021
1 parent ee60bcc commit a4f183b
Show file tree
Hide file tree
Showing 17 changed files with 8 additions and 744 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm

- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350)
- Remove the metric Bound Instruments interface and implementations. (#2399)
- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423)

## [1.2.0] - 2021-11-12

Expand Down
2 changes: 0 additions & 2 deletions exporters/otlp/otlpmetric/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,6 @@ replace go.opentelemetry.io/otel/example/otel-collector => ../../../example/otel

replace go.opentelemetry.io/otel/example/passthrough => ../../../example/passthrough

replace go.opentelemetry.io/otel/example/prom-collector => ../../../example/prom-collector

replace go.opentelemetry.io/otel/example/prometheus => ../../../example/prometheus

replace go.opentelemetry.io/otel/example/zipkin => ../../../example/zipkin
Expand Down
65 changes: 0 additions & 65 deletions exporters/otlp/otlpmetric/internal/metrictransform/metric.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,13 +239,6 @@ func sink(ctx context.Context, in <-chan result) ([]*metricpb.Metric, error) {
func Record(temporalitySelector aggregation.TemporalitySelector, r export.Record) (*metricpb.Metric, error) {
agg := r.Aggregation()
switch agg.Kind() {
case aggregation.MinMaxSumCountKind:
mmsc, ok := agg.(aggregation.MinMaxSumCount)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
return minMaxSumCount(r, mmsc)

case aggregation.HistogramKind:
h, ok := agg.(aggregation.Histogram)
if !ok {
Expand Down Expand Up @@ -390,64 +383,6 @@ func sumPoint(record export.Record, num number.Number, start, end time.Time, tem
return m, nil
}

// minMaxSumCountValue returns the values of the MinMaxSumCount Aggregator
// as discrete values.
func minMaxSumCountValues(a aggregation.MinMaxSumCount) (min, max, sum number.Number, count uint64, err error) {
if min, err = a.Min(); err != nil {
return
}
if max, err = a.Max(); err != nil {
return
}
if sum, err = a.Sum(); err != nil {
return
}
if count, err = a.Count(); err != nil {
return
}
return
}

// minMaxSumCount transforms a MinMaxSumCount Aggregator into an OTLP Metric.
func minMaxSumCount(record export.Record, a aggregation.MinMaxSumCount) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
min, max, sum, count, err := minMaxSumCountValues(a)
if err != nil {
return nil, err
}

m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
Data: &metricpb.Metric_Summary{
Summary: &metricpb.Summary{
DataPoints: []*metricpb.SummaryDataPoint{
{
Sum: sum.CoerceToFloat64(desc.NumberKind()),
Attributes: Iterator(labels.Iter()),
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Count: uint64(count),
QuantileValues: []*metricpb.SummaryDataPoint_ValueAtQuantile{
{
Quantile: 0.0,
Value: min.CoerceToFloat64(desc.NumberKind()),
},
{
Quantile: 1.0,
Value: max.CoerceToFloat64(desc.NumberKind()),
},
},
},
},
},
},
}
return m, nil
}

func histogramValues(a aggregation.Histogram) (boundaries []float64, counts []uint64, err error) {
var buckets aggregation.Buckets
if buckets, err = a.Histogram(); err != nil {
Expand Down
108 changes: 0 additions & 108 deletions exporters/otlp/otlpmetric/internal/metrictransform/metric_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ import (
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
Expand Down Expand Up @@ -96,84 +95,6 @@ func TestStringKeyValues(t *testing.T) {
}
}

func TestMinMaxSumCountValue(t *testing.T) {
mmscs := minmaxsumcount.New(2, &sdkapi.Descriptor{})
mmsc, ckpt := &mmscs[0], &mmscs[1]

assert.NoError(t, mmsc.Update(context.Background(), 1, &sdkapi.Descriptor{}))
assert.NoError(t, mmsc.Update(context.Background(), 10, &sdkapi.Descriptor{}))

// Prior to checkpointing ErrNoData should be returned.
_, _, _, _, err := minMaxSumCountValues(ckpt)
assert.EqualError(t, err, aggregation.ErrNoData.Error())

// Checkpoint to set non-zero values
require.NoError(t, mmsc.SynchronizedMove(ckpt, &sdkapi.Descriptor{}))
min, max, sum, count, err := minMaxSumCountValues(ckpt)
if assert.NoError(t, err) {
assert.Equal(t, min, number.NewInt64Number(1))
assert.Equal(t, max, number.NewInt64Number(10))
assert.Equal(t, sum, number.NewInt64Number(11))
assert.Equal(t, count, uint64(2))
}
}

func TestMinMaxSumCountDatapoints(t *testing.T) {
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
labels := attribute.NewSet(attribute.String("one", "1"))
mmscs := minmaxsumcount.New(2, &sdkapi.Descriptor{})
mmsc, ckpt := &mmscs[0], &mmscs[1]

assert.NoError(t, mmsc.Update(context.Background(), 1, &desc))
assert.NoError(t, mmsc.Update(context.Background(), 10, &desc))
require.NoError(t, mmsc.SynchronizedMove(ckpt, &desc))
expected := []*metricpb.SummaryDataPoint{
{
Count: 2,
Sum: 11,
StartTimeUnixNano: uint64(intervalStart.UnixNano()),
TimeUnixNano: uint64(intervalEnd.UnixNano()),
Attributes: []*commonpb.KeyValue{
{
Key: "one",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "1"}},
},
},
QuantileValues: []*metricpb.SummaryDataPoint_ValueAtQuantile{
{
Quantile: 0.0,
Value: 1.0,
},
{
Quantile: 1.0,
Value: 10.0,
},
},
},
}
record := export.NewRecord(&desc, &labels, ckpt.Aggregation(), intervalStart, intervalEnd)
m, err := minMaxSumCount(record, ckpt)
if assert.NoError(t, err) {
assert.Nil(t, m.GetGauge())
assert.Nil(t, m.GetSum())
assert.Nil(t, m.GetHistogram())
assert.Equal(t, expected, m.GetSummary().DataPoints)
assert.Nil(t, m.GetIntGauge()) // nolint
assert.Nil(t, m.GetIntSum()) // nolint
assert.Nil(t, m.GetIntHistogram()) // nolint
}
}

func TestMinMaxSumCountPropagatesErrors(t *testing.T) {
// ErrNoData should be returned by both the Min and Max values of
// a MinMaxSumCount Aggregator. Use this fact to check the error is
// correctly returned.
mmsc := &minmaxsumcount.New(1, &sdkapi.Descriptor{})[0]
_, _, _, _, err := minMaxSumCountValues(mmsc)
assert.Error(t, err)
assert.Equal(t, aggregation.ErrNoData, err)
}

func TestSumIntDataPoints(t *testing.T) {
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
labels := attribute.NewSet(attribute.String("one", "1"))
Expand Down Expand Up @@ -335,10 +256,6 @@ type testErrLastValue struct {
err error
}

type testErrMinMaxSumCount struct {
testErrSum
}

func (te *testErrLastValue) LastValue() (number.Number, time.Time, error) {
return 0, time.Time{}, te.err
}
Expand All @@ -353,23 +270,10 @@ func (te *testErrSum) Kind() aggregation.Kind {
return aggregation.SumKind
}

func (te *testErrMinMaxSumCount) Min() (number.Number, error) {
return 0, te.err
}

func (te *testErrMinMaxSumCount) Max() (number.Number, error) {
return 0, te.err
}

func (te *testErrMinMaxSumCount) Count() (uint64, error) {
return 0, te.err
}

var _ export.Aggregator = &testAgg{}
var _ aggregation.Aggregation = &testAgg{}
var _ aggregation.Sum = &testErrSum{}
var _ aggregation.LastValue = &testErrLastValue{}
var _ aggregation.MinMaxSumCount = &testErrMinMaxSumCount{}

func TestRecordAggregatorIncompatibleErrors(t *testing.T) {
makeMpb := func(kind aggregation.Kind, agg aggregation.Aggregation) (*metricpb.Metric, error) {
Expand All @@ -393,12 +297,6 @@ func TestRecordAggregatorIncompatibleErrors(t *testing.T) {
require.Error(t, err)
require.Nil(t, mpb)
require.True(t, errors.Is(err, ErrIncompatibleAgg))

mpb, err = makeMpb(aggregation.MinMaxSumCountKind, &lastvalue.New(1)[0])

require.Error(t, err)
require.Nil(t, mpb)
require.True(t, errors.Is(err, ErrIncompatibleAgg))
}

func TestRecordAggregatorUnexpectedErrors(t *testing.T) {
Expand All @@ -421,10 +319,4 @@ func TestRecordAggregatorUnexpectedErrors(t *testing.T) {
require.Error(t, err)
require.Nil(t, mpb)
require.True(t, errors.Is(err, errEx))

mpb, err = makeMpb(aggregation.MinMaxSumCountKind, &testErrMinMaxSumCount{testErrSum{errEx}})

require.Error(t, err)
require.Nil(t, mpb)
require.True(t, errors.Is(err, errEx))
}
2 changes: 0 additions & 2 deletions exporters/otlp/otlpmetric/internal/otlpmetrictest/otlptest.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,6 @@ func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlpmetric.Exporter
instruments := map[string]data{
"test-int64-counter": {sdkapi.CounterInstrumentKind, number.Int64Kind, 1},
"test-float64-counter": {sdkapi.CounterInstrumentKind, number.Float64Kind, 1},
"test-int64-histogram": {sdkapi.HistogramInstrumentKind, number.Int64Kind, 2},
"test-float64-histogram": {sdkapi.HistogramInstrumentKind, number.Float64Kind, 2},
"test-int64-gaugeobserver": {sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, 3},
"test-float64-gaugeobserver": {sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, 3},
}
Expand Down
22 changes: 0 additions & 22 deletions exporters/stdout/stdoutmetric/metric.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,6 @@ var _ exportmetric.Exporter = &metricExporter{}

type line struct {
Name string `json:"Name"`
Min interface{} `json:"Min,omitempty"`
Max interface{} `json:"Max,omitempty"`
Sum interface{} `json:"Sum,omitempty"`
Count interface{} `json:"Count,omitempty"`
LastValue interface{} `json:"Last,omitempty"`
Expand Down Expand Up @@ -83,26 +81,6 @@ func (e *metricExporter) Export(_ context.Context, res *resource.Resource, reade
return err
}
expose.Sum = value.AsInterface(kind)
}

if mmsc, ok := agg.(aggregation.MinMaxSumCount); ok {
count, err := mmsc.Count()
if err != nil {
return err
}
expose.Count = count

max, err := mmsc.Max()
if err != nil {
return err
}
expose.Max = max.AsInterface(kind)

min, err := mmsc.Min()
if err != nil {
return err
}
expose.Min = min.AsInterface(kind)
} else if lv, ok := agg.(aggregation.LastValue); ok {
value, timestamp, err := lv.LastValue()
if err != nil {
Expand Down
13 changes: 0 additions & 13 deletions exporters/stdout/stdoutmetric/metric_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,18 +156,6 @@ func TestStdoutLastValueFormat(t *testing.T) {
require.Equal(t, `[{"Name":"name.lastvalue{R=V,instrumentation.name=test,A=B,C=D}","Last":123.456}]`, fix.Output())
}

func TestStdoutMinMaxSumCount(t *testing.T) {
fix := newFixture(t)

counter := metric.Must(fix.meter).NewFloat64Counter("name.minmaxsumcount")
counter.Add(fix.ctx, 123.456, attribute.String("A", "B"), attribute.String("C", "D"))
counter.Add(fix.ctx, 876.543, attribute.String("A", "B"), attribute.String("C", "D"))

require.NoError(t, fix.cont.Stop(fix.ctx))

require.Equal(t, `[{"Name":"name.minmaxsumcount{R=V,instrumentation.name=test,A=B,C=D}","Min":123.456,"Max":876.543,"Sum":999.999,"Count":2}]`, fix.Output())
}

func TestStdoutHistogramFormat(t *testing.T) {
fix := newFixture(t, stdoutmetric.WithPrettyPrint())

Expand Down Expand Up @@ -201,7 +189,6 @@ func TestStdoutNoData(t *testing.T) {
}

runTwoAggs("lastvalue")
runTwoAggs("minmaxsumcount")
}

func TestStdoutResource(t *testing.T) {
Expand Down
19 changes: 4 additions & 15 deletions sdk/export/metric/aggregation/aggregation.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,15 +84,6 @@ type (
Sum() (number.Number, error)
Histogram() (Buckets, error)
}

// MinMaxSumCount supports the Min, Max, Sum, and Count interfaces.
MinMaxSumCount interface {
Aggregation
Min() (number.Number, error)
Max() (number.Number, error)
Sum() (number.Number, error)
Count() (uint64, error)
}
)

type (
Expand All @@ -106,18 +97,16 @@ type (
// deciding how to expose metric data. This enables
// user-supplied Aggregators to replace builtin Aggregators.
//
// For example, test for a Distribution before testing for a
// MinMaxSumCount, test for a Histogram before testing for a
// For example, test for a Histogram before testing for a
// Sum, and so on.
Kind string
)

// Kind description constants.
const (
SumKind Kind = "Sum"
MinMaxSumCountKind Kind = "MinMaxSumCount"
HistogramKind Kind = "Histogram"
LastValueKind Kind = "Lastvalue"
SumKind Kind = "Sum"
HistogramKind Kind = "Histogram"
LastValueKind Kind = "Lastvalue"
)

// Sentinel errors for Aggregation interface.
Expand Down
3 changes: 1 addition & 2 deletions sdk/export/metric/metric.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,7 @@ type CheckpointerFactory interface {
//
// Note that any Aggregator may be attached to any instrument--this is
// the result of the OpenTelemetry API/SDK separation. It is possible
// to attach a Sum aggregator to a Histogram instrument or a
// MinMaxSumCount aggregator to a Counter instrument.
// to attach a Sum aggregator to a Histogram instrument.
type Aggregator interface {
// Aggregation returns an Aggregation interface to access the
// current state of this Aggregator. The caller is
Expand Down
Loading

0 comments on commit a4f183b

Please sign in to comment.