Skip to content
This repository has been archived by the owner on Jul 31, 2023. It is now read-only.

Commit

Permalink
Removed Mean aggregation (#638)
Browse files Browse the repository at this point in the history
Fixes: #637
  • Loading branch information
Ramon Nogueira committed Mar 27, 2018
1 parent 12aa45c commit bae5a8b
Show file tree
Hide file tree
Showing 14 changed files with 8 additions and 354 deletions.
6 changes: 0 additions & 6 deletions exporter/prometheus/prometheus.go
Original file line number Diff line number Diff line change
Expand Up @@ -233,20 +233,14 @@ func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row)
switch data := row.Data.(type) {
case *view.CountData:
return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(*data), tagValues(row.Tags)...)

case *view.DistributionData:
points := make(map[float64]uint64)
for i, b := range v.Aggregation.Buckets {
points[b] = uint64(data.CountPerBucket[i])
}
return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags)...)

case *view.MeanData:
return prometheus.NewConstSummary(desc, uint64(data.Count), data.Sum(), make(map[float64]float64), tagValues(row.Tags)...)

case *view.SumData:
return prometheus.NewConstMetric(desc, prometheus.UntypedValue, float64(*data), tagValues(row.Tags)...)

default:
return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation)
}
Expand Down
15 changes: 0 additions & 15 deletions exporter/prometheus/prometheus_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,6 @@ func newView(measureName string, agg *view.Aggregation) *view.View {
func TestOnlyCumulativeWindowSupported(t *testing.T) {
// See Issue https://github.com/census-instrumentation/opencensus-go/issues/214.
count1 := view.CountData(1)
mean1 := view.MeanData{
Mean: 4.5,
Count: 5,
}
tests := []struct {
vds *view.Data
want int
Expand All @@ -72,15 +68,6 @@ func TestOnlyCumulativeWindowSupported(t *testing.T) {
},
want: 1,
},
2: {
vds: &view.Data{
View: newView("TestOnlyCumulativeWindowSupported/m3", view.Mean()),
Rows: []*view.Row{
{Data: &mean1},
},
},
want: 1,
},
}

for i, tt := range tests {
Expand Down Expand Up @@ -144,9 +131,7 @@ func TestCollectNonRacy(t *testing.T) {

for i := 0; i < 1e3; i++ {
count1 := view.CountData(1)
mean1 := &view.MeanData{Mean: 4.5, Count: 5}
vds := []*view.Data{
{View: newView(fmt.Sprintf("TestCollectNonRacy/m1-%d", i), view.Mean()), Rows: []*view.Row{{Data: mean1}}},
{View: newView(fmt.Sprintf("TestCollectNonRacy/m2-%d", i), view.Count()), Rows: []*view.Row{{Data: &count1}}},
}
for _, v := range vds {
Expand Down
16 changes: 0 additions & 16 deletions exporter/stackdriver/stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -317,22 +317,6 @@ func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue {
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
DoubleValue: float64(*v),
}}
case *view.MeanData:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
DistributionValue: &distributionpb.Distribution{
Count: int64(v.Count),
Mean: v.Mean,
SumOfSquaredDeviation: 0,
BucketOptions: &distributionpb.Distribution_BucketOptions{
Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
Bounds: []float64{0},
},
},
},
BucketCounts: []int64{0, int64(v.Count)},
},
}}
case *view.DistributionData:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
DistributionValue: &distributionpb.Distribution{
Expand Down
121 changes: 0 additions & 121 deletions exporter/stackdriver/stats_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"google.golang.org/api/option"
distributionpb "google.golang.org/genproto/googleapis/api/distribution"
"google.golang.org/genproto/googleapis/api/label"
"google.golang.org/genproto/googleapis/api/metric"
metricpb "google.golang.org/genproto/googleapis/api/metric"
Expand Down Expand Up @@ -101,14 +100,6 @@ func TestExporter_makeReq(t *testing.T) {
count2 := view.CountData(16)
sum1 := view.SumData(5.5)
sum2 := view.SumData(-11.1)
mean1 := view.MeanData{
Mean: 3.3,
Count: 7,
}
mean2 := view.MeanData{
Mean: -7.7,
Count: 5,
}
taskValue := getTaskValue()

tests := []struct {
Expand Down Expand Up @@ -253,98 +244,6 @@ func TestExporter_makeReq(t *testing.T) {
},
}},
},
{
name: "mean agg + timeline",
projID: "proj-id",
vd: newTestViewData(v, start, end, &mean1, &mean2),
want: []*monitoringpb.CreateTimeSeriesRequest{{
Name: monitoring.MetricProjectPath("proj-id"),
TimeSeries: []*monitoringpb.TimeSeries{
{
Metric: &metricpb.Metric{
Type: "custom.googleapis.com/opencensus/testview",
Labels: map[string]string{
"test_key": "test-value-1",
opencensusTaskKey: taskValue,
},
},
Resource: &monitoredrespb.MonitoredResource{
Type: "global",
},
Points: []*monitoringpb.Point{
{
Interval: &monitoringpb.TimeInterval{
StartTime: &timestamp.Timestamp{
Seconds: start.Unix(),
Nanos: int32(start.Nanosecond()),
},
EndTime: &timestamp.Timestamp{
Seconds: end.Unix(),
Nanos: int32(end.Nanosecond()),
},
},
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
DistributionValue: &distributionpb.Distribution{
Count: 7,
Mean: 3.3,
SumOfSquaredDeviation: 0,
BucketOptions: &distributionpb.Distribution_BucketOptions{
Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
Bounds: []float64{0},
},
},
},
BucketCounts: []int64{0, 7},
},
}},
},
},
},
{
Metric: &metricpb.Metric{
Type: "custom.googleapis.com/opencensus/testview",
Labels: map[string]string{
"test_key": "test-value-2",
opencensusTaskKey: taskValue,
},
},
Resource: &monitoredrespb.MonitoredResource{
Type: "global",
},
Points: []*monitoringpb.Point{
{
Interval: &monitoringpb.TimeInterval{
StartTime: &timestamp.Timestamp{
Seconds: start.Unix(),
Nanos: int32(start.Nanosecond()),
},
EndTime: &timestamp.Timestamp{
Seconds: end.Unix(),
Nanos: int32(end.Nanosecond()),
},
},
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
DistributionValue: &distributionpb.Distribution{
Count: 5,
Mean: -7.7,
SumOfSquaredDeviation: 0,
BucketOptions: &distributionpb.Distribution_BucketOptions{
Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
Bounds: []float64{0},
},
},
},
BucketCounts: []int64{0, 5},
},
}},
},
},
},
},
}},
},
{
name: "dist agg + time window",
projID: "proj-id",
Expand Down Expand Up @@ -479,16 +378,6 @@ func TestEqualAggWindowTagKeys(t *testing.T) {
agg: view.Sum(),
wantErr: false,
},
{
name: "mean agg",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_DISTRIBUTION,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
agg: view.Mean(),
wantErr: false,
},
{
name: "distribution agg - mismatch",
md: &metricpb.MetricDescriptor{
Expand All @@ -499,16 +388,6 @@ func TestEqualAggWindowTagKeys(t *testing.T) {
agg: view.Count(),
wantErr: true,
},
{
name: "mean agg - mismatch",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_DOUBLE,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
agg: view.Mean(),
wantErr: true,
},
{
name: "distribution agg with keys",
md: &metricpb.MetricDescriptor{
Expand Down
3 changes: 1 addition & 2 deletions internal/readme/stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,9 @@ func statsExamples() {
distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32)
countAgg := view.Count()
sumAgg := view.Sum()
meanAgg := view.Mean()
// END aggs

_, _, _, _ = distAgg, countAgg, sumAgg, meanAgg
_, _, _ = distAgg, countAgg, sumAgg

// START view
if err = view.Subscribe(&view.View{
Expand Down
2 changes: 1 addition & 1 deletion plugin/ocgrpc/client_metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ var (
Description: "RPC Errors",
TagKeys: []tag.Key{KeyStatus, KeyMethod},
Measure: ClientErrorCount,
Aggregation: view.Mean(),
Aggregation: view.Count(),
}

ClientRoundTripLatencyView = &view.View{
Expand Down
2 changes: 1 addition & 1 deletion plugin/ocgrpc/client_metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ func TestViewsAggregationsConform(t *testing.T) {
}
}

assertTypeOf(ClientErrorCountView, view.Mean())
assertTypeOf(ClientErrorCountView, view.Sum())
assertTypeOf(ClientRoundTripLatencyView, view.Distribution())
assertTypeOf(ClientRequestBytesView, view.Distribution())
assertTypeOf(ClientResponseBytesView, view.Distribution())
Expand Down
6 changes: 3 additions & 3 deletions plugin/ocgrpc/client_stats_handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ func TestClientDefaultCollections(t *testing.T) {
{Key: KeyStatus, Value: "Canceled"},
{Key: KeyMethod, Value: "package.service/method"},
},
Data: newMeanData(1, 1),
Data: newCountData(1),
},
},
},
Expand Down Expand Up @@ -238,14 +238,14 @@ func TestClientDefaultCollections(t *testing.T) {
{Key: KeyStatus, Value: "Canceled"},
{Key: KeyMethod, Value: "package.service/method"},
},
Data: newMeanData(1, 1),
Data: newCountData(1),
},
{
Tags: []tag.Tag{
{Key: KeyStatus, Value: "Aborted"},
{Key: KeyMethod, Value: "package.service/method"},
},
Data: newMeanData(1, 1),
Data: newCountData(1),
},
},
},
Expand Down
5 changes: 0 additions & 5 deletions plugin/ocgrpc/server_stats_handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -358,11 +358,6 @@ func newCountData(v int) *view.CountData {
return &cav
}

func newMeanData(count int64, mean float64) *view.MeanData {
mav := view.MeanData{Count: count, Mean: mean}
return &mav
}

func newDistributionData(countPerBucket []int64, count int64, min, max, mean, sumOfSquaredDev float64) *view.DistributionData {
return &view.DistributionData{
Count: count,
Expand Down
14 changes: 0 additions & 14 deletions stats/view/aggregation.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,12 +50,6 @@ var (
return newSumData(0)
},
}
aggMean = &Aggregation{
Type: AggTypeMean,
newData: func() AggregationData {
return newMeanData(0, 0)
},
}
)

// Count indicates that data collected and aggregated
Expand All @@ -74,14 +68,6 @@ func Sum() *Aggregation {
return aggSum
}

// Mean indicates that collect and aggregate data and maintain
// the mean value.
// For example, average latency in milliseconds can be aggregated by using
// Mean, although in most cases it is preferable to use a Distribution.
func Mean() *Aggregation {
return aggMean
}

// Distribution indicates that the desired aggregation is
// a histogram distribution.
//
Expand Down
42 changes: 0 additions & 42 deletions stats/view/aggregation_data.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,48 +90,6 @@ func (a *SumData) equal(other AggregationData) bool {
return math.Pow(float64(*a)-float64(*a2), 2) < epsilon
}

// MeanData is the aggregated data for the Mean aggregation.
// A mean aggregation processes data and maintains the mean value.
//
// Most users won't directly access mean data.
type MeanData struct {
Count int64 // number of data points aggregated
Mean float64 // mean of all data points
}

func newMeanData(mean float64, count int64) *MeanData {
return &MeanData{
Mean: mean,
Count: count,
}
}

// Sum returns the sum of all samples collected.
func (a *MeanData) Sum() float64 { return a.Mean * float64(a.Count) }

func (a *MeanData) isAggregationData() bool { return true }

func (a *MeanData) addSample(f float64) {
a.Count++
if a.Count == 1 {
a.Mean = f
return
}
a.Mean = a.Mean + (f-a.Mean)/float64(a.Count)
}

func (a *MeanData) clone() AggregationData {
return newMeanData(a.Mean, a.Count)
}

func (a *MeanData) equal(other AggregationData) bool {
a2, ok := other.(*MeanData)
if !ok {
return false
}
return a.Count == a2.Count && math.Pow(a.Mean-a2.Mean, 2) < epsilon
}

// DistributionData is the aggregated data for the
// Distribution aggregation.
//
Expand Down
4 changes: 0 additions & 4 deletions stats/view/aggregation_data_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,6 @@ func TestDataClone(t *testing.T) {
name: "distribution data",
src: dist,
},
{
name: "mean data",
src: newMeanData(11.0, 5),
},
{
name: "sum data",
src: newSumData(65.7),
Expand Down

0 comments on commit bae5a8b

Please sign in to comment.