From bae5a8b3e365b430471b696abf0536dc2f1c60b1 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Tue, 27 Mar 2018 09:08:52 -0700 Subject: [PATCH] Removed Mean aggregation (#638) Fixes: #637 --- exporter/prometheus/prometheus.go | 6 - exporter/prometheus/prometheus_test.go | 15 --- exporter/stackdriver/stats.go | 16 --- exporter/stackdriver/stats_test.go | 121 -------------------- internal/readme/stats.go | 3 +- plugin/ocgrpc/client_metrics.go | 2 +- plugin/ocgrpc/client_metrics_test.go | 2 +- plugin/ocgrpc/client_stats_handler_test.go | 6 +- plugin/ocgrpc/server_stats_handler_test.go | 5 - stats/view/aggregation.go | 14 --- stats/view/aggregation_data.go | 42 ------- stats/view/aggregation_data_test.go | 4 - stats/view/view_test.go | 123 +-------------------- zpages/rpcz.go | 3 - 14 files changed, 8 insertions(+), 354 deletions(-) diff --git a/exporter/prometheus/prometheus.go b/exporter/prometheus/prometheus.go index 4f5183d66..70307491d 100644 --- a/exporter/prometheus/prometheus.go +++ b/exporter/prometheus/prometheus.go @@ -233,20 +233,14 @@ func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) switch data := row.Data.(type) { case *view.CountData: return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(*data), tagValues(row.Tags)...) - case *view.DistributionData: points := make(map[float64]uint64) for i, b := range v.Aggregation.Buckets { points[b] = uint64(data.CountPerBucket[i]) } return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags)...) - - case *view.MeanData: - return prometheus.NewConstSummary(desc, uint64(data.Count), data.Sum(), make(map[float64]float64), tagValues(row.Tags)...) - case *view.SumData: return prometheus.NewConstMetric(desc, prometheus.UntypedValue, float64(*data), tagValues(row.Tags)...) - default: return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation) } diff --git a/exporter/prometheus/prometheus_test.go b/exporter/prometheus/prometheus_test.go index 456751fbd..5ca5abcc6 100644 --- a/exporter/prometheus/prometheus_test.go +++ b/exporter/prometheus/prometheus_test.go @@ -49,10 +49,6 @@ func newView(measureName string, agg *view.Aggregation) *view.View { func TestOnlyCumulativeWindowSupported(t *testing.T) { // See Issue https://github.com/census-instrumentation/opencensus-go/issues/214. count1 := view.CountData(1) - mean1 := view.MeanData{ - Mean: 4.5, - Count: 5, - } tests := []struct { vds *view.Data want int @@ -72,15 +68,6 @@ func TestOnlyCumulativeWindowSupported(t *testing.T) { }, want: 1, }, - 2: { - vds: &view.Data{ - View: newView("TestOnlyCumulativeWindowSupported/m3", view.Mean()), - Rows: []*view.Row{ - {Data: &mean1}, - }, - }, - want: 1, - }, } for i, tt := range tests { @@ -144,9 +131,7 @@ func TestCollectNonRacy(t *testing.T) { for i := 0; i < 1e3; i++ { count1 := view.CountData(1) - mean1 := &view.MeanData{Mean: 4.5, Count: 5} vds := []*view.Data{ - {View: newView(fmt.Sprintf("TestCollectNonRacy/m1-%d", i), view.Mean()), Rows: []*view.Row{{Data: mean1}}}, {View: newView(fmt.Sprintf("TestCollectNonRacy/m2-%d", i), view.Count()), Rows: []*view.Row{{Data: &count1}}}, } for _, v := range vds { diff --git a/exporter/stackdriver/stats.go b/exporter/stackdriver/stats.go index dbd1cba0c..69b2e147e 100644 --- a/exporter/stackdriver/stats.go +++ b/exporter/stackdriver/stats.go @@ -317,22 +317,6 @@ func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue { return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ DoubleValue: float64(*v), }} - case *view.MeanData: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{ - DistributionValue: &distributionpb.Distribution{ - Count: int64(v.Count), - Mean: v.Mean, - SumOfSquaredDeviation: 0, - BucketOptions: &distributionpb.Distribution_BucketOptions{ - Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ - ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ - Bounds: []float64{0}, - }, - }, - }, - BucketCounts: []int64{0, int64(v.Count)}, - }, - }} case *view.DistributionData: return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{ DistributionValue: &distributionpb.Distribution{ diff --git a/exporter/stackdriver/stats_test.go b/exporter/stackdriver/stats_test.go index 8809845a8..b47e47619 100644 --- a/exporter/stackdriver/stats_test.go +++ b/exporter/stackdriver/stats_test.go @@ -26,7 +26,6 @@ import ( "go.opencensus.io/stats/view" "go.opencensus.io/tag" "google.golang.org/api/option" - distributionpb "google.golang.org/genproto/googleapis/api/distribution" "google.golang.org/genproto/googleapis/api/label" "google.golang.org/genproto/googleapis/api/metric" metricpb "google.golang.org/genproto/googleapis/api/metric" @@ -101,14 +100,6 @@ func TestExporter_makeReq(t *testing.T) { count2 := view.CountData(16) sum1 := view.SumData(5.5) sum2 := view.SumData(-11.1) - mean1 := view.MeanData{ - Mean: 3.3, - Count: 7, - } - mean2 := view.MeanData{ - Mean: -7.7, - Count: 5, - } taskValue := getTaskValue() tests := []struct { @@ -253,98 +244,6 @@ func TestExporter_makeReq(t *testing.T) { }, }}, }, - { - name: "mean agg + timeline", - projID: "proj-id", - vd: newTestViewData(v, start, end, &mean1, &mean2), - want: []*monitoringpb.CreateTimeSeriesRequest{{ - Name: monitoring.MetricProjectPath("proj-id"), - TimeSeries: []*monitoringpb.TimeSeries{ - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-1", - opencensusTaskKey: taskValue, - }, - }, - Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - }, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{ - DistributionValue: &distributionpb.Distribution{ - Count: 7, - Mean: 3.3, - SumOfSquaredDeviation: 0, - BucketOptions: &distributionpb.Distribution_BucketOptions{ - Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ - ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ - Bounds: []float64{0}, - }, - }, - }, - BucketCounts: []int64{0, 7}, - }, - }}, - }, - }, - }, - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-2", - opencensusTaskKey: taskValue, - }, - }, - Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - }, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{ - DistributionValue: &distributionpb.Distribution{ - Count: 5, - Mean: -7.7, - SumOfSquaredDeviation: 0, - BucketOptions: &distributionpb.Distribution_BucketOptions{ - Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ - ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ - Bounds: []float64{0}, - }, - }, - }, - BucketCounts: []int64{0, 5}, - }, - }}, - }, - }, - }, - }, - }}, - }, { name: "dist agg + time window", projID: "proj-id", @@ -479,16 +378,6 @@ func TestEqualAggWindowTagKeys(t *testing.T) { agg: view.Sum(), wantErr: false, }, - { - name: "mean agg", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_DISTRIBUTION, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - agg: view.Mean(), - wantErr: false, - }, { name: "distribution agg - mismatch", md: &metricpb.MetricDescriptor{ @@ -499,16 +388,6 @@ func TestEqualAggWindowTagKeys(t *testing.T) { agg: view.Count(), wantErr: true, }, - { - name: "mean agg - mismatch", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_DOUBLE, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - agg: view.Mean(), - wantErr: true, - }, { name: "distribution agg with keys", md: &metricpb.MetricDescriptor{ diff --git a/internal/readme/stats.go b/internal/readme/stats.go index f94e30483..f1c11d9a0 100644 --- a/internal/readme/stats.go +++ b/internal/readme/stats.go @@ -38,10 +38,9 @@ func statsExamples() { distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32) countAgg := view.Count() sumAgg := view.Sum() - meanAgg := view.Mean() // END aggs - _, _, _, _ = distAgg, countAgg, sumAgg, meanAgg + _, _, _ = distAgg, countAgg, sumAgg // START view if err = view.Subscribe(&view.View{ diff --git a/plugin/ocgrpc/client_metrics.go b/plugin/ocgrpc/client_metrics.go index e9a4d2bdf..3d48ffff0 100644 --- a/plugin/ocgrpc/client_metrics.go +++ b/plugin/ocgrpc/client_metrics.go @@ -43,7 +43,7 @@ var ( Description: "RPC Errors", TagKeys: []tag.Key{KeyStatus, KeyMethod}, Measure: ClientErrorCount, - Aggregation: view.Mean(), + Aggregation: view.Count(), } ClientRoundTripLatencyView = &view.View{ diff --git a/plugin/ocgrpc/client_metrics_test.go b/plugin/ocgrpc/client_metrics_test.go index 5a0489eae..61bea17f0 100644 --- a/plugin/ocgrpc/client_metrics_test.go +++ b/plugin/ocgrpc/client_metrics_test.go @@ -40,7 +40,7 @@ func TestViewsAggregationsConform(t *testing.T) { } } - assertTypeOf(ClientErrorCountView, view.Mean()) + assertTypeOf(ClientErrorCountView, view.Sum()) assertTypeOf(ClientRoundTripLatencyView, view.Distribution()) assertTypeOf(ClientRequestBytesView, view.Distribution()) assertTypeOf(ClientResponseBytesView, view.Distribution()) diff --git a/plugin/ocgrpc/client_stats_handler_test.go b/plugin/ocgrpc/client_stats_handler_test.go index 90da4e5b1..a8c4ba5b0 100644 --- a/plugin/ocgrpc/client_stats_handler_test.go +++ b/plugin/ocgrpc/client_stats_handler_test.go @@ -158,7 +158,7 @@ func TestClientDefaultCollections(t *testing.T) { {Key: KeyStatus, Value: "Canceled"}, {Key: KeyMethod, Value: "package.service/method"}, }, - Data: newMeanData(1, 1), + Data: newCountData(1), }, }, }, @@ -238,14 +238,14 @@ func TestClientDefaultCollections(t *testing.T) { {Key: KeyStatus, Value: "Canceled"}, {Key: KeyMethod, Value: "package.service/method"}, }, - Data: newMeanData(1, 1), + Data: newCountData(1), }, { Tags: []tag.Tag{ {Key: KeyStatus, Value: "Aborted"}, {Key: KeyMethod, Value: "package.service/method"}, }, - Data: newMeanData(1, 1), + Data: newCountData(1), }, }, }, diff --git a/plugin/ocgrpc/server_stats_handler_test.go b/plugin/ocgrpc/server_stats_handler_test.go index 85a315eec..d5d0c2097 100644 --- a/plugin/ocgrpc/server_stats_handler_test.go +++ b/plugin/ocgrpc/server_stats_handler_test.go @@ -358,11 +358,6 @@ func newCountData(v int) *view.CountData { return &cav } -func newMeanData(count int64, mean float64) *view.MeanData { - mav := view.MeanData{Count: count, Mean: mean} - return &mav -} - func newDistributionData(countPerBucket []int64, count int64, min, max, mean, sumOfSquaredDev float64) *view.DistributionData { return &view.DistributionData{ Count: count, diff --git a/stats/view/aggregation.go b/stats/view/aggregation.go index 3ae6d02bd..c3272e52e 100644 --- a/stats/view/aggregation.go +++ b/stats/view/aggregation.go @@ -50,12 +50,6 @@ var ( return newSumData(0) }, } - aggMean = &Aggregation{ - Type: AggTypeMean, - newData: func() AggregationData { - return newMeanData(0, 0) - }, - } ) // Count indicates that data collected and aggregated @@ -74,14 +68,6 @@ func Sum() *Aggregation { return aggSum } -// Mean indicates that collect and aggregate data and maintain -// the mean value. -// For example, average latency in milliseconds can be aggregated by using -// Mean, although in most cases it is preferable to use a Distribution. -func Mean() *Aggregation { - return aggMean -} - // Distribution indicates that the desired aggregation is // a histogram distribution. // diff --git a/stats/view/aggregation_data.go b/stats/view/aggregation_data.go index 8d74bc47f..09cc7c141 100644 --- a/stats/view/aggregation_data.go +++ b/stats/view/aggregation_data.go @@ -90,48 +90,6 @@ func (a *SumData) equal(other AggregationData) bool { return math.Pow(float64(*a)-float64(*a2), 2) < epsilon } -// MeanData is the aggregated data for the Mean aggregation. -// A mean aggregation processes data and maintains the mean value. -// -// Most users won't directly access mean data. -type MeanData struct { - Count int64 // number of data points aggregated - Mean float64 // mean of all data points -} - -func newMeanData(mean float64, count int64) *MeanData { - return &MeanData{ - Mean: mean, - Count: count, - } -} - -// Sum returns the sum of all samples collected. -func (a *MeanData) Sum() float64 { return a.Mean * float64(a.Count) } - -func (a *MeanData) isAggregationData() bool { return true } - -func (a *MeanData) addSample(f float64) { - a.Count++ - if a.Count == 1 { - a.Mean = f - return - } - a.Mean = a.Mean + (f-a.Mean)/float64(a.Count) -} - -func (a *MeanData) clone() AggregationData { - return newMeanData(a.Mean, a.Count) -} - -func (a *MeanData) equal(other AggregationData) bool { - a2, ok := other.(*MeanData) - if !ok { - return false - } - return a.Count == a2.Count && math.Pow(a.Mean-a2.Mean, 2) < epsilon -} - // DistributionData is the aggregated data for the // Distribution aggregation. // diff --git a/stats/view/aggregation_data_test.go b/stats/view/aggregation_data_test.go index 6e9d0e57f..e4fcfd28d 100644 --- a/stats/view/aggregation_data_test.go +++ b/stats/view/aggregation_data_test.go @@ -41,10 +41,6 @@ func TestDataClone(t *testing.T) { name: "distribution data", src: dist, }, - { - name: "mean data", - src: newMeanData(11.0, 5), - }, { name: "sum data", src: newSumData(65.7), diff --git a/stats/view/view_test.go b/stats/view/view_test.go index 390a338ac..2312e0d4c 100644 --- a/stats/view/view_test.go +++ b/stats/view/view_test.go @@ -313,7 +313,7 @@ func TestCanonicalize(t *testing.T) { k1, _ := tag.NewKey("k1") k2, _ := tag.NewKey("k2") m, _ := stats.Int64("TestCanonicalize/m1", "desc desc", stats.UnitNone) - v := &View{TagKeys: []tag.Key{k2, k1}, Measure: m, Aggregation: Mean()} + v := &View{TagKeys: []tag.Key{k2, k1}, Measure: m, Aggregation: Sum()} err := v.canonicalize() if err != nil { t.Fatal(err) @@ -332,125 +332,6 @@ func TestCanonicalize(t *testing.T) { } } -func Test_View_MeasureFloat64_AggregationMean(t *testing.T) { - k1, _ := tag.NewKey("k1") - k2, _ := tag.NewKey("k2") - k3, _ := tag.NewKey("k3") - m, _ := stats.Int64("Test_View_MeasureFloat64_AggregationMean/m1", "", stats.UnitNone) - viewDesc := &View{TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: Mean()} - view, err := newViewInternal(viewDesc) - if err != nil { - t.Fatal(err) - } - - type tagString struct { - k tag.Key - v string - } - type record struct { - f float64 - tags []tagString - } - - tcs := []struct { - label string - records []record - wantRows []*Row - }{ - { - "1", - []record{ - {1, []tagString{{k1, "v1"}}}, - {5, []tagString{{k1, "v1"}}}, - }, - []*Row{ - { - []tag.Tag{{Key: k1, Value: "v1"}}, - newMeanData(3, 2), - }, - }, - }, - { - "2", - []record{ - {1, []tagString{{k1, "v1"}}}, - {5, []tagString{{k2, "v2"}}}, - {-0.5, []tagString{{k2, "v2"}}}, - }, - []*Row{ - { - []tag.Tag{{Key: k1, Value: "v1"}}, - newMeanData(1, 1), - }, - { - []tag.Tag{{Key: k2, Value: "v2"}}, - newMeanData(2.25, 2), - }, - }, - }, - { - "3", - []record{ - {1, []tagString{{k1, "v1"}}}, - {5, []tagString{{k1, "v1"}, {k3, "v3"}}}, - {1, []tagString{{k1, "v1 other"}}}, - {5, []tagString{{k2, "v2"}}}, - {5, []tagString{{k1, "v1"}, {k2, "v2"}}}, - {-4, []tagString{{k1, "v1"}, {k2, "v2"}}}, - }, - []*Row{ - { - []tag.Tag{{Key: k1, Value: "v1"}}, - newMeanData(3, 2), - }, - { - []tag.Tag{{Key: k1, Value: "v1 other"}}, - newMeanData(1, 1), - }, - { - []tag.Tag{{Key: k2, Value: "v2"}}, - newMeanData(5, 1), - }, - { - []tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}}, - newMeanData(0.5, 2), - }, - }, - }, - } - - for _, tt := range tcs { - view.clearRows() - view.subscribe() - for _, r := range tt.records { - mods := []tag.Mutator{} - for _, t := range r.tags { - mods = append(mods, tag.Insert(t.k, t.v)) - } - ctx, err := tag.New(context.Background(), mods...) - if err != nil { - t.Errorf("%v: New = %v", tt.label, err) - } - view.addSample(tag.FromContext(ctx), r.f) - } - - gotRows := view.collectedRows() - for i, got := range gotRows { - if !containsRow(tt.wantRows, got) { - t.Errorf("%v-%d: got row %v; want none", tt.label, i, got) - break - } - } - - for i, want := range tt.wantRows { - if !containsRow(gotRows, want) { - t.Errorf("%v-%d: got none; want row %v", tt.label, i, want) - break - } - } - } -} - func TestViewSortedKeys(t *testing.T) { k1, _ := tag.NewKey("a") k2, _ := tag.NewKey("b") @@ -463,7 +344,7 @@ func TestViewSortedKeys(t *testing.T) { Description: "desc sort_keys", TagKeys: ks, Measure: m, - Aggregation: Mean(), + Aggregation: Sum(), }) // Subscribe normalizes the view by sorting the tag keys, retrieve the normalized view v := Find("sort_keys") diff --git a/zpages/rpcz.go b/zpages/rpcz.go index 663b9b308..2753f2fe5 100644 --- a/zpages/rpcz.go +++ b/zpages/rpcz.go @@ -251,9 +251,6 @@ func (s snapExporter) ExportView(vd *view.Data) { case *view.DistributionData: sum = v.Sum() count = float64(v.Count) - case *view.MeanData: - sum = v.Sum() - count = float64(v.Count) case *view.SumData: sum = float64(*v) count = float64(*v)