diff --git a/aggregation/id.go b/aggregation/id.go new file mode 100644 index 0000000..4eb8cb2 --- /dev/null +++ b/aggregation/id.go @@ -0,0 +1,95 @@ +// Copyright (c) 2018 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package aggregation + +import ( + "fmt" + + "github.com/m3db/m3metrics/generated/proto/schema" +) + +const ( + // IDLen is the length of the ID. + // The IDLen will be 1 when maxTypeID <= 63. + IDLen = (maxTypeID)/64 + 1 + + // ID uses an array of int64 to represent aggregation types. + idBitShift = 6 + idBitMask = 63 +) + +// ID represents a compressed view of Types. +type ID [IDLen]uint64 + +// NewIDFromSchema creates an ID from schema. +func NewIDFromSchema(input []schema.AggregationType) (ID, error) { + aggTypes, err := NewTypesFromSchema(input) + if err != nil { + return DefaultID, err + } + + // TODO(cw): consider pooling these compressors, + // this allocates one extra slice of length one per call. + id, err := NewIDCompressor().Compress(aggTypes) + if err != nil { + return DefaultID, err + } + return id, nil +} + +// MustCompressTypes compresses a list of aggregation types to +// an ID, it panics if an error was encountered. +func MustCompressTypes(aggTypes ...Type) ID { + res, err := NewIDCompressor().Compress(aggTypes) + if err != nil { + panic(err.Error()) + } + return res +} + +// IsDefault checks if the ID is the default aggregation type. +func (id ID) IsDefault() bool { + return id == DefaultID +} + +// Contains checks if the given aggregation type is contained in the aggregation id. +func (id ID) Contains(aggType Type) bool { + if !aggType.IsValid() { + return false + } + idx := int(aggType) >> idBitShift // aggType / 64 + offset := uint(aggType) & idBitMask // aggType % 64 + return (id[idx] & (1 << offset)) > 0 +} + +// Types returns the aggregation types defined by the id. +func (id ID) Types() (Types, error) { + return NewIDDecompressor().Decompress(id) +} + +// String for debugging. +func (id ID) String() string { + aggTypes, err := id.Types() + if err != nil { + return fmt.Sprintf("[invalid ID: %v]", err) + } + return aggTypes.String() +} diff --git a/policy/aggregation_id_compress.go b/aggregation/id_compress.go similarity index 54% rename from policy/aggregation_id_compress.go rename to aggregation/id_compress.go index b8d8b57..0ed3d03 100644 --- a/policy/aggregation_id_compress.go +++ b/aggregation/id_compress.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package policy +package aggregation import ( "fmt" @@ -26,56 +26,56 @@ import ( "github.com/willf/bitset" ) -// AggregationIDCompressor can compress AggregationTypes into an AggregationID. -type AggregationIDCompressor interface { +// IDCompressor can compress Types into an ID. +type IDCompressor interface { // Compress compresses a set of aggregation types into an aggregation id. - Compress(aggTypes AggregationTypes) (AggregationID, error) + Compress(aggTypes Types) (ID, error) // MustCompress compresses a set of aggregation types into an aggregation id, // and panics if an error is encountered. - MustCompress(aggTypes AggregationTypes) AggregationID + MustCompress(aggTypes Types) ID } -// AggregationIDDecompressor can decompress AggregationID. -type AggregationIDDecompressor interface { +// IDDecompressor can decompress ID. +type IDDecompressor interface { // Decompress decompresses aggregation types, // returns error if any invalid aggregation type is encountered. - Decompress(compressed AggregationID) (AggregationTypes, error) + Decompress(compressed ID) (Types, error) } -type aggregationIDCompressor struct { +type idCompressor struct { bs *bitset.BitSet } -// NewAggregationIDCompressor returns a new AggregationIDCompressor. -func NewAggregationIDCompressor() AggregationIDCompressor { +// NewIDCompressor returns a new IDCompressor. +func NewIDCompressor() IDCompressor { // NB(cw): If we start to support more than 64 types, the library will // expand the underlying word list itself. - return &aggregationIDCompressor{ - bs: bitset.New(MaxAggregationTypeID), + return &idCompressor{ + bs: bitset.New(maxTypeID), } } -func (c *aggregationIDCompressor) Compress(aggTypes AggregationTypes) (AggregationID, error) { +func (c *idCompressor) Compress(aggTypes Types) (ID, error) { c.bs.ClearAll() for _, aggType := range aggTypes { if !aggType.IsValid() { - return DefaultAggregationID, fmt.Errorf("could not compress invalid AggregationType %v", aggType) + return DefaultID, fmt.Errorf("could not compress invalid Type %v", aggType) } c.bs.Set(uint(aggType.ID())) } codes := c.bs.Bytes() - var id AggregationID - // NB(cw) it's guaranteed that len(id) == len(codes) == AggregationIDLen, we need to copy + var id ID + // NB(cw) it's guaranteed that len(id) == len(codes) == IDLen, we need to copy // the words in bitset out because the bitset contains a slice internally. - for i := 0; i < AggregationIDLen; i++ { + for i := 0; i < IDLen; i++ { id[i] = codes[i] } return id, nil } -func (c *aggregationIDCompressor) MustCompress(aggTypes AggregationTypes) AggregationID { +func (c *idCompressor) MustCompress(aggTypes Types) ID { id, err := c.Compress(aggTypes) if err != nil { panic(fmt.Errorf("unable to compress %v: %v", aggTypes, err)) @@ -83,48 +83,48 @@ func (c *aggregationIDCompressor) MustCompress(aggTypes AggregationTypes) Aggreg return id } -type aggregationIDDecompressor struct { +type idDecompressor struct { bs *bitset.BitSet buf []uint64 - pool AggregationTypesPool + pool TypesPool } -// NewAggregationIDDecompressor returns a new AggregationIDDecompressor. -func NewAggregationIDDecompressor() AggregationIDDecompressor { - return NewPooledAggregationIDDecompressor(nil) +// NewIDDecompressor returns a new IDDecompressor. +func NewIDDecompressor() IDDecompressor { + return NewPooledIDDecompressor(nil) } -// NewPooledAggregationIDDecompressor returns a new pooled AggregationTypeDecompressor. -func NewPooledAggregationIDDecompressor(pool AggregationTypesPool) AggregationIDDecompressor { - bs := bitset.New(MaxAggregationTypeID) - return &aggregationIDDecompressor{ +// NewPooledIDDecompressor returns a new pooled TypeDecompressor. +func NewPooledIDDecompressor(pool TypesPool) IDDecompressor { + bs := bitset.New(maxTypeID) + return &idDecompressor{ bs: bs, buf: bs.Bytes(), pool: pool, } } -func (c *aggregationIDDecompressor) Decompress(id AggregationID) (AggregationTypes, error) { +func (c *idDecompressor) Decompress(id ID) (Types, error) { if id.IsDefault() { - return DefaultAggregationTypes, nil + return DefaultTypes, nil } - // NB(cw) it's guaranteed that len(c.buf) == len(id) == AggregationIDLen, we need to copy + // NB(cw) it's guaranteed that len(c.buf) == len(id) == IDLen, we need to copy // the words from id into a slice to be used in bitset. for i := range id { c.buf[i] = id[i] } - var res AggregationTypes + var res Types if c.pool == nil { - res = make(AggregationTypes, 0, MaxAggregationTypeID) + res = make(Types, 0, maxTypeID) } else { res = c.pool.Get() } for i, e := c.bs.NextSet(0); e; i, e = c.bs.NextSet(i + 1) { - aggType := AggregationType(i) + aggType := Type(i) if !aggType.IsValid() { - return DefaultAggregationTypes, fmt.Errorf("invalid AggregationType: %s", aggType.String()) + return DefaultTypes, fmt.Errorf("invalid Type: %s", aggType.String()) } res = append(res, aggType) diff --git a/policy/aggregation_id_compress_test.go b/aggregation/id_compress_test.go similarity index 57% rename from policy/aggregation_id_compress_test.go rename to aggregation/id_compress_test.go index dc5ad4d..fd84c9b 100644 --- a/policy/aggregation_id_compress_test.go +++ b/aggregation/id_compress_test.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package policy +package aggregation import ( "testing" @@ -28,27 +28,27 @@ import ( "github.com/stretchr/testify/require" ) -func TestAggregationIDCompressRoundTrip(t *testing.T) { +func TestIDCompressRoundTrip(t *testing.T) { testcases := []struct { - input AggregationTypes - result AggregationTypes + input Types + result Types expectErr bool }{ - {DefaultAggregationTypes, DefaultAggregationTypes, false}, - {[]AggregationType{UnknownAggregationType}, DefaultAggregationTypes, true}, - {[]AggregationType{Min, Max}, []AggregationType{Min, Max}, false}, - {[]AggregationType{Last}, []AggregationType{Last}, false}, - {[]AggregationType{P999, P9999}, []AggregationType{P999, P9999}, false}, - {[]AggregationType{1, 5, 9, 3, 2}, []AggregationType{1, 2, 3, 5, 9}, false}, + {DefaultTypes, DefaultTypes, false}, + {[]Type{UnknownType}, DefaultTypes, true}, + {[]Type{Min, Max}, []Type{Min, Max}, false}, + {[]Type{Last}, []Type{Last}, false}, + {[]Type{P999, P9999}, []Type{P999, P9999}, false}, + {[]Type{1, 5, 9, 3, 2}, []Type{1, 2, 3, 5, 9}, false}, // 50 is an unknown aggregation type. - {[]AggregationType{10, 50}, DefaultAggregationTypes, true}, + {[]Type{10, 50}, DefaultTypes, true}, } - p := NewAggregationTypesPool(pool.NewObjectPoolOptions().SetSize(1)) - p.Init(func() AggregationTypes { - return make(AggregationTypes, 0, MaxAggregationTypeID) + p := NewTypesPool(pool.NewObjectPoolOptions().SetSize(1)) + p.Init(func() Types { + return make(Types, 0, maxTypeID) }) - compressor, decompressor := NewAggregationIDCompressor(), NewPooledAggregationIDDecompressor(p) + compressor, decompressor := NewIDCompressor(), NewPooledIDDecompressor(p) for _, test := range testcases { codes, err := compressor.Compress(test.input) if test.expectErr { @@ -61,12 +61,12 @@ func TestAggregationIDCompressRoundTrip(t *testing.T) { } } -func TestAggregationIDDecompressError(t *testing.T) { - compressor, decompressor := NewAggregationIDCompressor(), NewAggregationIDDecompressor() - _, err := decompressor.Decompress([AggregationIDLen]uint64{1}) // aggregation type: UnknownAggregationType. +func TestIDDecompressError(t *testing.T) { + compressor, decompressor := NewIDCompressor(), NewIDDecompressor() + _, err := decompressor.Decompress([IDLen]uint64{1}) require.Error(t, err) - max, err := compressor.Compress([]AggregationType{Last, Min, Max, Mean, Median, Count, Sum, SumSq, Stdev, P95, P99, P999, P9999}) + max, err := compressor.Compress([]Type{Last, Min, Max, Mean, Median, Count, Sum, SumSq, Stdev, P95, P99, P999, P9999}) require.NoError(t, err) max[0] = max[0] << 1 diff --git a/aggregation/type.go b/aggregation/type.go new file mode 100644 index 0000000..8cb63ab --- /dev/null +++ b/aggregation/type.go @@ -0,0 +1,403 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package aggregation + +import ( + "fmt" + "strings" + + "github.com/m3db/m3metrics/generated/proto/schema" + "github.com/m3db/m3x/pool" +) + +// Supported aggregation types. +const ( + UnknownType Type = iota + Last + Min + Max + Mean + Median + Count + Sum + SumSq + Stdev + P10 + P20 + P30 + P40 + P50 + P60 + P70 + P80 + P90 + P95 + P99 + P999 + P9999 + + nextTypeID = iota +) + +const ( + // maxTypeID is the largest id of all the valid aggregation types. + // NB(cw) maxTypeID is guaranteed to be greater or equal + // to len(ValidTypes). + // Iff ids of all the valid aggregation types are consecutive, + // maxTypeID == len(ValidTypes). + maxTypeID = nextTypeID - 1 + + typesSeparator = "," +) + +var ( + emptyStruct struct{} + + // DefaultTypes is a default list of aggregation types. + DefaultTypes Types + + // DefaultID is a default ID. + DefaultID ID + + // ValidTypes is the list of all the valid aggregation types. + ValidTypes = map[Type]struct{}{ + Last: emptyStruct, + Min: emptyStruct, + Max: emptyStruct, + Mean: emptyStruct, + Median: emptyStruct, + Count: emptyStruct, + Sum: emptyStruct, + SumSq: emptyStruct, + Stdev: emptyStruct, + P10: emptyStruct, + P20: emptyStruct, + P30: emptyStruct, + P40: emptyStruct, + P50: emptyStruct, + P60: emptyStruct, + P70: emptyStruct, + P80: emptyStruct, + P90: emptyStruct, + P95: emptyStruct, + P99: emptyStruct, + P999: emptyStruct, + P9999: emptyStruct, + } + + typeStringMap map[string]Type +) + +// Type defines an aggregation function. +type Type int + +// NewTypeFromSchema creates an aggregation type from a schema. +func NewTypeFromSchema(input schema.AggregationType) (Type, error) { + aggType := Type(input) + if !aggType.IsValid() { + return UnknownType, fmt.Errorf("invalid aggregation type from schema: %s", input) + } + return aggType, nil +} + +// ID returns the id of the Type. +func (a Type) ID() int { + return int(a) +} + +// IsValid checks if an Type is valid. +func (a Type) IsValid() bool { + _, ok := ValidTypes[a] + return ok +} + +// IsValidForGauge if an Type is valid for Gauge. +func (a Type) IsValidForGauge() bool { + switch a { + case Last, Min, Max, Mean, Count, Sum, SumSq, Stdev: + return true + default: + return false + } +} + +// IsValidForCounter if an Type is valid for Counter. +func (a Type) IsValidForCounter() bool { + switch a { + case Min, Max, Mean, Count, Sum, SumSq, Stdev: + return true + default: + return false + } +} + +// IsValidForTimer if an Type is valid for Timer. +func (a Type) IsValidForTimer() bool { + switch a { + case Last: + return false + default: + return true + } +} + +// Quantile returns the quantile represented by the Type. +func (a Type) Quantile() (float64, bool) { + switch a { + case P10: + return 0.1, true + case P20: + return 0.2, true + case P30: + return 0.3, true + case P40: + return 0.4, true + case P50, Median: + return 0.5, true + case P60: + return 0.6, true + case P70: + return 0.7, true + case P80: + return 0.8, true + case P90: + return 0.9, true + case P95: + return 0.95, true + case P99: + return 0.99, true + case P999: + return 0.999, true + case P9999: + return 0.9999, true + default: + return 0, false + } +} + +// Schema returns the schema of the aggregation type. +func (a Type) Schema() (schema.AggregationType, error) { + s := schema.AggregationType(a) + if err := validateSchemaType(s); err != nil { + return schema.AggregationType_UNKNOWN, err + } + return s, nil +} + +// UnmarshalYAML unmarshals aggregation type from a string. +func (a *Type) UnmarshalYAML(unmarshal func(interface{}) error) error { + var str string + if err := unmarshal(&str); err != nil { + return err + } + + parsed, err := ParseType(str) + if err != nil { + return err + } + *a = parsed + return nil +} + +func validateSchemaType(a schema.AggregationType) error { + _, ok := schema.AggregationType_name[int32(a)] + if !ok { + return fmt.Errorf("invalid schema aggregation type: %v", a) + } + return nil +} + +// ParseType parses an aggregation type. +func ParseType(str string) (Type, error) { + aggType, ok := typeStringMap[str] + if !ok { + return UnknownType, fmt.Errorf("invalid aggregation type: %s", str) + } + return aggType, nil +} + +// Types is a list of Types. +type Types []Type + +// NewTypesFromSchema creates a list of aggregation types from a schema. +func NewTypesFromSchema(input []schema.AggregationType) (Types, error) { + res := make([]Type, len(input)) + for i, t := range input { + aggType, err := NewTypeFromSchema(t) + if err != nil { + return DefaultTypes, err + } + res[i] = aggType + } + return res, nil +} + +// UnmarshalYAML unmarshals aggregation types from a string. +func (aggTypes *Types) UnmarshalYAML(unmarshal func(interface{}) error) error { + var str string + if err := unmarshal(&str); err != nil { + return err + } + + parsed, err := ParseTypes(str) + if err != nil { + return err + } + *aggTypes = parsed + return nil +} + +// Contains checks if the given type is contained in the aggregation types. +func (aggTypes Types) Contains(aggType Type) bool { + for _, at := range aggTypes { + if at == aggType { + return true + } + } + return false +} + +// IsDefault checks if the Types is the default aggregation type. +func (aggTypes Types) IsDefault() bool { + return len(aggTypes) == 0 +} + +// String is for debugging. +func (aggTypes Types) String() string { + if len(aggTypes) == 0 { + return "" + } + + parts := make([]string, len(aggTypes)) + for i, aggType := range aggTypes { + parts[i] = aggType.String() + } + return strings.Join(parts, typesSeparator) +} + +// IsValidForGauge checks if the list of aggregation types is valid for Gauge. +func (aggTypes Types) IsValidForGauge() bool { + for _, aggType := range aggTypes { + if !aggType.IsValidForGauge() { + return false + } + } + return true +} + +// IsValidForCounter checks if the list of aggregation types is valid for Counter. +func (aggTypes Types) IsValidForCounter() bool { + for _, aggType := range aggTypes { + if !aggType.IsValidForCounter() { + return false + } + } + return true +} + +// IsValidForTimer checks if the list of aggregation types is valid for Timer. +func (aggTypes Types) IsValidForTimer() bool { + for _, aggType := range aggTypes { + if !aggType.IsValidForTimer() { + return false + } + } + return true +} + +// PooledQuantiles returns all the quantiles found in the list +// of aggregation types. Using a floats pool if available. +// +// A boolean will also be returned to indicate whether the +// returned float slice is from the pool. +func (aggTypes Types) PooledQuantiles(p pool.FloatsPool) ([]float64, bool) { + var ( + res []float64 + initialized bool + medianAdded bool + pooled bool + ) + for _, aggType := range aggTypes { + q, ok := aggType.Quantile() + if !ok { + continue + } + // Dedup P50 and Median. + if aggType == P50 || aggType == Median { + if medianAdded { + continue + } + medianAdded = true + } + if !initialized { + if p == nil { + res = make([]float64, 0, len(aggTypes)) + } else { + res = p.Get(len(aggTypes)) + pooled = true + } + initialized = true + } + res = append(res, q) + } + return res, pooled +} + +// Schema returns the schema of the aggregation types. +func (aggTypes Types) Schema() ([]schema.AggregationType, error) { + // This is the same as returning an empty slice from the functionality perspective. + // It makes creating testing fixtures much simpler. + if aggTypes == nil { + return nil, nil + } + + res := make([]schema.AggregationType, len(aggTypes)) + for i, aggType := range aggTypes { + s, err := aggType.Schema() + if err != nil { + return nil, err + } + res[i] = s + } + + return res, nil +} + +// ParseTypes parses a list of aggregation types in the form of type1,type2,type3. +func ParseTypes(str string) (Types, error) { + parts := strings.Split(str, typesSeparator) + res := make(Types, len(parts)) + for i := range parts { + aggType, err := ParseType(parts[i]) + if err != nil { + return nil, err + } + res[i] = aggType + } + return res, nil +} + +func init() { + typeStringMap = make(map[string]Type, maxTypeID) + for aggType := range ValidTypes { + typeStringMap[aggType.String()] = aggType + } +} diff --git a/policy/aggregation_type_config.go b/aggregation/type_config.go similarity index 79% rename from policy/aggregation_type_config.go rename to aggregation/type_config.go index 0f1a47a..f2b1916 100644 --- a/policy/aggregation_type_config.go +++ b/aggregation/type_config.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package policy +package aggregation import ( "fmt" @@ -27,28 +27,28 @@ import ( "github.com/m3db/m3x/pool" ) -// AggregationTypesConfiguration contains configuration for aggregation types. -type AggregationTypesConfiguration struct { +// TypesConfiguration contains configuration for aggregation types. +type TypesConfiguration struct { // Default aggregation types for counter metrics. - DefaultCounterAggregationTypes *AggregationTypes `yaml:"defaultCounterAggregationTypes"` + DefaultCounterAggregationTypes *Types `yaml:"defaultCounterAggregationTypes"` // Default aggregation types for timer metrics. - DefaultTimerAggregationTypes *AggregationTypes `yaml:"defaultTimerAggregationTypes"` + DefaultTimerAggregationTypes *Types `yaml:"defaultTimerAggregationTypes"` // Default aggregation types for gauge metrics. - DefaultGaugeAggregationTypes *AggregationTypes `yaml:"defaultGaugeAggregationTypes"` + DefaultGaugeAggregationTypes *Types `yaml:"defaultGaugeAggregationTypes"` // Global type string overrides. - GlobalOverrides map[AggregationType]string `yaml:"globalOverrides"` + GlobalOverrides map[Type]string `yaml:"globalOverrides"` // Type string overrides for Counter. - CounterOverrides map[AggregationType]string `yaml:"counterOverrides"` + CounterOverrides map[Type]string `yaml:"counterOverrides"` // Type string overrides for Timer. - TimerOverrides map[AggregationType]string `yaml:"timerOverrides"` + TimerOverrides map[Type]string `yaml:"timerOverrides"` // Type string overrides for Gauge. - GaugeOverrides map[AggregationType]string `yaml:"gaugeOverrides"` + GaugeOverrides map[Type]string `yaml:"gaugeOverrides"` // TransformFnType configs the global type string transform function type. TransformFnType *transformFnType `yaml:"transformFnType"` @@ -61,8 +61,8 @@ type AggregationTypesConfiguration struct { } // NewOptions creates a new Option. -func (c AggregationTypesConfiguration) NewOptions(instrumentOpts instrument.Options) (AggregationTypesOptions, error) { - opts := NewAggregationTypesOptions() +func (c TypesConfiguration) NewOptions(instrumentOpts instrument.Options) (TypesOptions, error) { + opts := NewTypesOptions() if c.TransformFnType != nil { fn, err := c.TransformFnType.TransformFn() if err != nil { @@ -91,10 +91,10 @@ func (c AggregationTypesConfiguration) NewOptions(instrumentOpts instrument.Opti // Set aggregation types pool. iOpts := instrumentOpts.SetMetricsScope(scope.SubScope("aggregation-types-pool")) aggTypesPoolOpts := c.AggregationTypesPool.NewObjectPoolOptions(iOpts) - aggTypesPool := NewAggregationTypesPool(aggTypesPoolOpts) - opts = opts.SetAggregationTypesPool(aggTypesPool) - aggTypesPool.Init(func() AggregationTypes { - return make(AggregationTypes, 0, len(ValidAggregationTypes)) + aggTypesPool := NewTypesPool(aggTypesPoolOpts) + opts = opts.SetTypesPool(aggTypesPool) + aggTypesPool.Init(func() Types { + return make(Types, 0, len(ValidTypes)) }) // Set quantiles pool. @@ -112,8 +112,8 @@ func (c AggregationTypesConfiguration) NewOptions(instrumentOpts instrument.Opti return opts, nil } -func parseTypeStringOverride(m map[AggregationType]string) map[AggregationType][]byte { - res := make(map[AggregationType][]byte, len(m)) +func parseTypeStringOverride(m map[Type]string) map[Type][]byte { + res := make(map[Type][]byte, len(m)) for aggType, s := range m { var bytes []byte if s != "" { diff --git a/policy/aggregation_type_config_test.go b/aggregation/type_config_test.go similarity index 83% rename from policy/aggregation_type_config_test.go rename to aggregation/type_config_test.go index 6e44550..c97c344 100644 --- a/policy/aggregation_type_config_test.go +++ b/aggregation/type_config_test.go @@ -18,22 +18,23 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package policy +package aggregation import ( "strings" "testing" "github.com/m3db/m3x/instrument" + "github.com/stretchr/testify/require" yaml "gopkg.in/yaml.v2" ) -func TestAggregationTypesConfiguration(t *testing.T) { +func TestTypesConfiguration(t *testing.T) { str := ` defaultGaugeAggregationTypes: Max defaultTimerAggregationTypes: P50,P99,P9999 -globalOverrides: +globalOverrides: Mean: testMean gaugeOverrides: Last: "" @@ -42,13 +43,13 @@ counterOverrides: transformFnType: suffix ` - var cfg AggregationTypesConfiguration + var cfg TypesConfiguration require.NoError(t, yaml.Unmarshal([]byte(str), &cfg)) opts, err := cfg.NewOptions(instrument.NewOptions()) require.NoError(t, err) require.Equal(t, defaultDefaultCounterAggregationTypes, opts.DefaultCounterAggregationTypes()) - require.Equal(t, AggregationTypes{Max}, opts.DefaultGaugeAggregationTypes()) - require.Equal(t, AggregationTypes{P50, P99, P9999}, opts.DefaultTimerAggregationTypes()) + require.Equal(t, Types{Max}, opts.DefaultGaugeAggregationTypes()) + require.Equal(t, Types{P50, P99, P9999}, opts.DefaultTimerAggregationTypes()) require.Equal(t, []byte(".testMean"), opts.TypeStringForCounter(Mean)) require.Equal(t, []byte(nil), opts.TypeStringForCounter(Sum)) require.Equal(t, []byte(nil), opts.TypeStringForGauge(Last)) @@ -59,11 +60,11 @@ transformFnType: suffix } } -func TestAggregationTypesConfigNoTransformFnType(t *testing.T) { +func TestTypesConfigurationNoTransformFnType(t *testing.T) { str := ` defaultGaugeAggregationTypes: Max defaultTimerAggregationTypes: P50,P99,P9999 -globalOverrides: +globalOverrides: Mean: testMean gaugeOverrides: Last: "" @@ -71,19 +72,19 @@ counterOverrides: Sum: "" ` - var cfg AggregationTypesConfiguration + var cfg TypesConfiguration require.NoError(t, yaml.Unmarshal([]byte(str), &cfg)) _, err := cfg.NewOptions(instrument.NewOptions()) require.NoError(t, err) } -func TestAggregationTypesConfigurationError(t *testing.T) { +func TestTypesConfigurationError(t *testing.T) { str := ` defaultGaugeAggregationTypes: Max defaultTimerAggregationTypes: P50,P99,P9999 transformFnType: bla ` - var cfg AggregationTypesConfiguration + var cfg TypesConfiguration require.Error(t, yaml.Unmarshal([]byte(str), &cfg)) } diff --git a/aggregation/type_string.go b/aggregation/type_string.go new file mode 100644 index 0000000..2fbd63e --- /dev/null +++ b/aggregation/type_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=Type; DO NOT EDIT + +package aggregation + +import "fmt" + +const _Type_name = "UnknownTypeLastMinMaxMeanMedianCountSumSumSqStdevP10P20P30P40P50P60P70P80P90P95P99P999P9999" + +var _Type_index = [...]uint8{0, 11, 15, 18, 21, 25, 31, 36, 39, 44, 49, 52, 55, 58, 61, 64, 67, 70, 73, 76, 79, 82, 86, 91} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return fmt.Sprintf("Type(%d)", i) + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/policy/aggregation_type_test.go b/aggregation/type_test.go similarity index 60% rename from policy/aggregation_type_test.go rename to aggregation/type_test.go index 8da3114..e7cd6b1 100644 --- a/policy/aggregation_type_test.go +++ b/aggregation/type_test.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package policy +package aggregation import ( "testing" @@ -29,21 +29,21 @@ import ( yaml "gopkg.in/yaml.v2" ) -func TestAggregationTypeIsValid(t *testing.T) { +func TestTypeIsValid(t *testing.T) { require.True(t, P9999.IsValid()) - require.False(t, AggregationType(int(P9999)+1).IsValid()) + require.False(t, Type(int(P9999)+1).IsValid()) } -func TestAggregationTypeMaxID(t *testing.T) { - require.Equal(t, MaxAggregationTypeID, P9999.ID()) - require.Equal(t, P9999, AggregationType(MaxAggregationTypeID)) - require.Equal(t, MaxAggregationTypeID, len(ValidAggregationTypes)) +func TestTypeMaxID(t *testing.T) { + require.Equal(t, maxTypeID, P9999.ID()) + require.Equal(t, P9999, Type(maxTypeID)) + require.Equal(t, maxTypeID, len(ValidTypes)) } -func TestAggregationTypeUnmarshalYAML(t *testing.T) { +func TestTypeUnmarshalYAML(t *testing.T) { inputs := []struct { str string - expected AggregationType + expected Type expectedErr bool }{ { @@ -60,7 +60,7 @@ func TestAggregationTypeUnmarshalYAML(t *testing.T) { }, } for _, input := range inputs { - var aggtype AggregationType + var aggtype Type err := yaml.Unmarshal([]byte(input.str), &aggtype) if input.expectedErr { @@ -73,25 +73,25 @@ func TestAggregationTypeUnmarshalYAML(t *testing.T) { } } -func TestAggregationTypesIsDefault(t *testing.T) { - require.True(t, DefaultAggregationTypes.IsDefault()) +func TestTypesIsDefault(t *testing.T) { + require.True(t, DefaultTypes.IsDefault()) - require.False(t, AggregationTypes{Max}.IsDefault()) + require.False(t, Types{Max}.IsDefault()) } -func TestAggregationTypesUnmarshalYAML(t *testing.T) { +func TestTypesUnmarshalYAML(t *testing.T) { inputs := []struct { str string - expected AggregationTypes + expected Types expectedErr bool }{ { str: "Min", - expected: AggregationTypes{Min}, + expected: Types{Min}, }, { str: "Mean,Max,P99,P9999", - expected: AggregationTypes{Mean, Max, P99, P9999}, + expected: Types{Mean, Max, P99, P9999}, }, { str: "Min,Max,P99,P9999,P100", @@ -115,7 +115,7 @@ func TestAggregationTypesUnmarshalYAML(t *testing.T) { }, } for _, input := range inputs { - var aggtypes AggregationTypes + var aggtypes Types err := yaml.Unmarshal([]byte(input.str), &aggtypes) if input.expectedErr { @@ -128,29 +128,29 @@ func TestAggregationTypesUnmarshalYAML(t *testing.T) { } } -func TestParseAggregationTypes(t *testing.T) { +func TestParseTypes(t *testing.T) { inputs := []struct { str string - expected AggregationTypes + expected Types }{ { str: "Min", - expected: AggregationTypes{Min}, + expected: Types{Min}, }, { str: "Min,Max", - expected: AggregationTypes{Min, Max}, + expected: Types{Min, Max}, }, } for _, input := range inputs { - res, err := ParseAggregationTypes(input.str) + res, err := ParseTypes(input.str) require.NoError(t, err) require.Equal(t, input.expected, res) } } func TestQuantiles(t *testing.T) { - res, ok := AggregationTypes{Median, P95, P99}.PooledQuantiles(nil) + res, ok := Types{Median, P95, P99}.PooledQuantiles(nil) require.Equal(t, []float64{0.5, 0.95, 0.99}, res) require.False(t, ok) @@ -161,40 +161,41 @@ func TestQuantiles(t *testing.T) { nil, ) p.Init() - res, ok = AggregationTypes{Median, P95, P99}.PooledQuantiles(p) + res, ok = Types{Median, P95, P99}.PooledQuantiles(p) require.Equal(t, []float64{0.5, 0.95, 0.99}, res) require.True(t, ok) p.Put(res) - res2, ok := AggregationTypes{P90, P95, P99}.PooledQuantiles(p) + res2, ok := Types{P90, P95, P99}.PooledQuantiles(p) require.Equal(t, []float64{0.9, 0.95, 0.99}, res2) require.Equal(t, res, res2) require.True(t, ok) p.Put(res2) - res3, ok := AggregationTypes{Count}.PooledQuantiles(p) + res3, ok := Types{Count}.PooledQuantiles(p) require.Nil(t, res3) require.False(t, ok) - res4, ok := AggregationTypes{P10, P20, P30, P40, P50, Median, P60, P70, P80, P90, P95, P99, P999, P9999}.PooledQuantiles(p) + res4, ok := Types{P10, P20, P30, P40, P50, Median, P60, P70, P80, P90, P95, P99, P999, P9999}.PooledQuantiles(p) require.Equal(t, []float64{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 0.9999}, res4) require.True(t, ok) } -func TestAggregationIDContains(t *testing.T) { - require.True(t, MustCompressAggregationTypes(P99).Contains(P99)) - require.True(t, MustCompressAggregationTypes(P99, P95).Contains(P99)) - require.True(t, MustCompressAggregationTypes(P99, P95).Contains(P95)) - require.True(t, MustCompressAggregationTypes(Sum, Last, P999).Contains(Sum)) - require.True(t, MustCompressAggregationTypes(Sum, Last, P999).Contains(Last)) - require.True(t, MustCompressAggregationTypes(Sum, Last, P999).Contains(P999)) - require.False(t, MustCompressAggregationTypes(Sum, Last, P999).Contains(P9999)) - require.False(t, MustCompressAggregationTypes().Contains(P99)) - require.False(t, MustCompressAggregationTypes(P99, P95).Contains(P9999)) +func TestIDContains(t *testing.T) { + require.True(t, MustCompressTypes(P99).Contains(P99)) + require.True(t, MustCompressTypes(P99, P95).Contains(P99)) + require.True(t, MustCompressTypes(P99, P95).Contains(P95)) + require.True(t, MustCompressTypes(Sum, Last, P999).Contains(Sum)) + require.True(t, MustCompressTypes(Sum, Last, P999).Contains(Last)) + require.True(t, MustCompressTypes(Sum, Last, P999).Contains(P999)) + require.False(t, MustCompressTypes(Sum, Last, P999).Contains(P9999)) + require.False(t, MustCompressTypes().Contains(P99)) + require.False(t, MustCompressTypes(P99, P95).Contains(P9999)) } -func TestCompressedAggregationTypesIsDefault(t *testing.T) { - var id AggregationID + +func TestCompressedTypesIsDefault(t *testing.T) { + var id ID require.True(t, id.IsDefault()) id[0] = 8 diff --git a/policy/aggregation_type_options.go b/aggregation/types_options.go similarity index 65% rename from policy/aggregation_type_options.go rename to aggregation/types_options.go index d93fae8..e2da41a 100644 --- a/policy/aggregation_type_options.go +++ b/aggregation/types_options.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package policy +package aggregation import ( "bytes" @@ -36,33 +36,33 @@ type QuantileTypeStringFn func(quantile float64) []byte // TypeStringTransformFn transforms the type string. type TypeStringTransformFn func(typeString []byte) []byte -// AggregationTypesOptions provides a set of options for aggregation types. -type AggregationTypesOptions interface { +// TypesOptions provides a set of options for aggregation types. +type TypesOptions interface { // Validate checks if the options are valid. Validate() error // Read-Write methods. // SetDefaultCounterAggregationTypes sets the default aggregation types for counters. - SetDefaultCounterAggregationTypes(value AggregationTypes) AggregationTypesOptions + SetDefaultCounterAggregationTypes(value Types) TypesOptions // DefaultCounterAggregationTypes returns the default aggregation types for counters. - DefaultCounterAggregationTypes() AggregationTypes + DefaultCounterAggregationTypes() Types // SetDefaultTimerAggregationTypes sets the default aggregation types for timers. - SetDefaultTimerAggregationTypes(value AggregationTypes) AggregationTypesOptions + SetDefaultTimerAggregationTypes(value Types) TypesOptions // DefaultTimerAggregationTypes returns the default aggregation types for timers. - DefaultTimerAggregationTypes() AggregationTypes + DefaultTimerAggregationTypes() Types // SetDefaultGaugeAggregationTypes sets the default aggregation types for gauges. - SetDefaultGaugeAggregationTypes(value AggregationTypes) AggregationTypesOptions + SetDefaultGaugeAggregationTypes(value Types) TypesOptions // DefaultGaugeAggregationTypes returns the default aggregation types for gauges. - DefaultGaugeAggregationTypes() AggregationTypes + DefaultGaugeAggregationTypes() Types // SetTimerQuantileTypeStringFn sets the quantile type string function for timers. - SetTimerQuantileTypeStringFn(value QuantileTypeStringFn) AggregationTypesOptions + SetTimerQuantileTypeStringFn(value QuantileTypeStringFn) TypesOptions // TimerQuantileTypeStringFn returns the quantile type string function for timers. TimerQuantileTypeStringFn() QuantileTypeStringFn @@ -70,19 +70,19 @@ type AggregationTypesOptions interface { // SetGlobalTypeStringTransformFn sets the type string transform functions. // The GlobalTypeStringTransformFn will only be applied to the global type strings, it // will NOT be applied to the metric type specific overrides. - SetGlobalTypeStringTransformFn(value TypeStringTransformFn) AggregationTypesOptions + SetGlobalTypeStringTransformFn(value TypeStringTransformFn) TypesOptions // GlobalTypeStringTransformFn returns the global type string transform functions. GlobalTypeStringTransformFn() TypeStringTransformFn - // SetAggregationTypesPool sets the aggregation types pool. - SetAggregationTypesPool(pool AggregationTypesPool) AggregationTypesOptions + // SetTypesPool sets the aggregation types pool. + SetTypesPool(pool TypesPool) TypesOptions - // AggregationTypesPool returns the aggregation types pool. - AggregationTypesPool() AggregationTypesPool + // TypesPool returns the aggregation types pool. + TypesPool() TypesPool // SetQuantilesPool sets the timer quantiles pool. - SetQuantilesPool(pool pool.FloatsPool) AggregationTypesOptions + SetQuantilesPool(pool pool.FloatsPool) TypesOptions // QuantilesPool returns the timer quantiles pool. QuantilesPool() pool.FloatsPool @@ -91,16 +91,16 @@ type AggregationTypesOptions interface { // SetGlobalTypeStringOverrides sets the global type strings. // The GlobalTypeStringTransformFn will be applied to these type strings. - SetGlobalTypeStringOverrides(m map[AggregationType][]byte) AggregationTypesOptions + SetGlobalTypeStringOverrides(m map[Type][]byte) TypesOptions // SetCounterTypeStringOverrides sets the overrides for counter type strings. - SetCounterTypeStringOverrides(m map[AggregationType][]byte) AggregationTypesOptions + SetCounterTypeStringOverrides(m map[Type][]byte) TypesOptions // SetTimerTypeStringOverrides sets the overrides for timer type strings. - SetTimerTypeStringOverrides(m map[AggregationType][]byte) AggregationTypesOptions + SetTimerTypeStringOverrides(m map[Type][]byte) TypesOptions // SetGaugeTypeStringOverrides sets the overrides for gauge type strings. - SetGaugeTypeStringOverrides(m map[AggregationType][]byte) AggregationTypesOptions + SetGaugeTypeStringOverrides(m map[Type][]byte) TypesOptions // Read only methods. @@ -116,37 +116,37 @@ type AggregationTypesOptions interface { // default gauge aggregation types. DefaultGaugeAggregationTypeStrings() [][]byte - // TypeString returns the type string for the aggregation type for counters. - TypeStringForCounter(value AggregationType) []byte + // TypeStringForCounter returns the type string for the aggregation type for counters. + TypeStringForCounter(value Type) []byte - // TypeString returns the type string for the aggregation type for timers. - TypeStringForTimer(value AggregationType) []byte + // TypeStringForTimer returns the type string for the aggregation type for timers. + TypeStringForTimer(value Type) []byte - // TypeString returns the type string for the aggregation type for gauges. - TypeStringForGauge(value AggregationType) []byte + // TypeStringForGauge returns the type string for the aggregation type for gauges. + TypeStringForGauge(value Type) []byte - // AggregationTypeForCounter returns the aggregation type with the given type string for counters. - AggregationTypeForCounter(value []byte) AggregationType + // TypeForCounter returns the aggregation type with the given type string for counters. + TypeForCounter(value []byte) Type - // AggregationTypeForTimer returns the aggregation type with the given type string for timers. - AggregationTypeForTimer(value []byte) AggregationType + // TypeForTimer returns the aggregation type with the given type string for timers. + TypeForTimer(value []byte) Type - // AggregationTypeForGauge returns the aggregation type with the given type string for gauges. - AggregationTypeForGauge(value []byte) AggregationType + // TypeForGauge returns the aggregation type with the given type string for gauges. + TypeForGauge(value []byte) Type // TimerQuantiles returns the quantiles for timers. TimerQuantiles() []float64 // IsContainedInDefaultAggregationTypes checks if the given aggregation type is // contained in the default aggregation types for the metric type. - IsContainedInDefaultAggregationTypes(at AggregationType, mt metric.Type) bool + IsContainedInDefaultAggregationTypes(at Type, mt metric.Type) bool } var ( - defaultDefaultCounterAggregationTypes = AggregationTypes{ + defaultDefaultCounterAggregationTypes = Types{ Sum, } - defaultDefaultTimerAggregationTypes = AggregationTypes{ + defaultDefaultTimerAggregationTypes = Types{ Sum, SumSq, Mean, @@ -159,7 +159,7 @@ var ( P95, P99, } - defaultDefaultGaugeAggregationTypes = AggregationTypes{ + defaultDefaultGaugeAggregationTypes = Types{ Last, } @@ -174,22 +174,22 @@ var ( defaultStdevTypeString = []byte("stdev") defaultMedianTypeString = []byte("median") - defaultCounterTypeStringOverride = map[AggregationType][]byte{ + defaultCounterAggregationTypeStringOverride = map[Type][]byte{ Sum: nil, } - defaultTimerTypeStringOverride = map[AggregationType][]byte{} - defaultGaugeTypeStringOverride = map[AggregationType][]byte{ + defaultTimerTypeStringOveAggregationTypes = map[Type][]byte{} + defaultGaugeAggregationTypestringOverride = map[Type][]byte{ Last: nil, } ) type options struct { - defaultCounterAggregationTypes AggregationTypes - defaultTimerAggregationTypes AggregationTypes - defaultGaugeAggregationTypes AggregationTypes + defaultCounterAggregationTypes Types + defaultTimerAggregationTypes Types + defaultGaugeAggregationTypes Types timerQuantileTypeStringFn QuantileTypeStringFn globalTypeStringTransformFn TypeStringTransformFn - aggTypesPool AggregationTypesPool + aggTypesPool TypesPool quantilesPool pool.FloatsPool defaultTypeStrings [][]byte @@ -197,28 +197,28 @@ type options struct { timerTypeStrings [][]byte gaugeTypeStrings [][]byte - globalTypeStringOverrides map[AggregationType][]byte - counterTypeStringOverride map[AggregationType][]byte - timerTypeStringOverride map[AggregationType][]byte - gaugeTypeStringOverride map[AggregationType][]byte + globalTypeStringOverrides map[Type][]byte + counterTypeStringOverride map[Type][]byte + timerTypeStringOverride map[Type][]byte + gaugeTypeStringOverride map[Type][]byte defaultCounterAggregationTypeStrings [][]byte defaultTimerAggregationTypeStrings [][]byte - defaultGaugeAggregationTypeStrings [][]byte + defaultGaugeAggregationTypestrings [][]byte timerQuantiles []float64 } -// NewAggregationTypesOptions returns a default AggregationTypesOptions. -func NewAggregationTypesOptions() AggregationTypesOptions { +// NewTypesOptions returns a default TypesOptions. +func NewTypesOptions() TypesOptions { o := &options{ defaultCounterAggregationTypes: defaultDefaultCounterAggregationTypes, defaultGaugeAggregationTypes: defaultDefaultGaugeAggregationTypes, defaultTimerAggregationTypes: defaultDefaultTimerAggregationTypes, timerQuantileTypeStringFn: defaultTimerQuantileTypeStringFn, globalTypeStringTransformFn: noopTransformFn, - counterTypeStringOverride: defaultCounterTypeStringOverride, - timerTypeStringOverride: defaultTimerTypeStringOverride, - gaugeTypeStringOverride: defaultGaugeTypeStringOverride, + counterTypeStringOverride: defaultCounterAggregationTypeStringOverride, + timerTypeStringOverride: defaultTimerTypeStringOveAggregationTypes, + gaugeTypeStringOverride: defaultGaugeAggregationTypestringOverride, } o.initPools() o.computeAllDerived() @@ -226,9 +226,9 @@ func NewAggregationTypesOptions() AggregationTypesOptions { } func (o *options) initPools() { - o.aggTypesPool = NewAggregationTypesPool(nil) - o.aggTypesPool.Init(func() AggregationTypes { - return make(AggregationTypes, 0, len(ValidAggregationTypes)) + o.aggTypesPool = NewTypesPool(nil) + o.aggTypesPool.Init(func() Types { + return make(Types, 0, len(ValidTypes)) }) o.quantilesPool = pool.NewFloatsPool(nil, nil) @@ -251,25 +251,25 @@ func (o *options) ensureUniqueTypeString(typeStrings [][]byte, t metric.Type) er s := string(typeString) if existAggType, ok := m[s]; ok { return fmt.Errorf("invalid options, found duplicated type string: '%s' for aggregation type %v and %v for metric type: %s", - s, AggregationType(aggType), AggregationType(existAggType), t.String()) + s, Type(aggType), Type(existAggType), t.String()) } m[s] = aggType } return nil } -func (o *options) SetDefaultCounterAggregationTypes(aggTypes AggregationTypes) AggregationTypesOptions { +func (o *options) SetDefaultCounterAggregationTypes(aggTypes Types) TypesOptions { opts := *o opts.defaultCounterAggregationTypes = aggTypes opts.computeTypeStrings() return &opts } -func (o *options) DefaultCounterAggregationTypes() AggregationTypes { +func (o *options) DefaultCounterAggregationTypes() Types { return o.defaultCounterAggregationTypes } -func (o *options) SetDefaultTimerAggregationTypes(aggTypes AggregationTypes) AggregationTypesOptions { +func (o *options) SetDefaultTimerAggregationTypes(aggTypes Types) TypesOptions { opts := *o opts.defaultTimerAggregationTypes = aggTypes opts.computeQuantiles() @@ -277,22 +277,22 @@ func (o *options) SetDefaultTimerAggregationTypes(aggTypes AggregationTypes) Agg return &opts } -func (o *options) DefaultTimerAggregationTypes() AggregationTypes { +func (o *options) DefaultTimerAggregationTypes() Types { return o.defaultTimerAggregationTypes } -func (o *options) SetDefaultGaugeAggregationTypes(aggTypes AggregationTypes) AggregationTypesOptions { +func (o *options) SetDefaultGaugeAggregationTypes(aggTypes Types) TypesOptions { opts := *o opts.defaultGaugeAggregationTypes = aggTypes opts.computeTypeStrings() return &opts } -func (o *options) DefaultGaugeAggregationTypes() AggregationTypes { +func (o *options) DefaultGaugeAggregationTypes() Types { return o.defaultGaugeAggregationTypes } -func (o *options) SetTimerQuantileTypeStringFn(value QuantileTypeStringFn) AggregationTypesOptions { +func (o *options) SetTimerQuantileTypeStringFn(value QuantileTypeStringFn) TypesOptions { opts := *o opts.timerQuantileTypeStringFn = value opts.computeTypeStrings() @@ -303,7 +303,7 @@ func (o *options) TimerQuantileTypeStringFn() QuantileTypeStringFn { return o.timerQuantileTypeStringFn } -func (o *options) SetGlobalTypeStringTransformFn(value TypeStringTransformFn) AggregationTypesOptions { +func (o *options) SetGlobalTypeStringTransformFn(value TypeStringTransformFn) TypesOptions { opts := *o opts.globalTypeStringTransformFn = value opts.computeTypeStrings() @@ -314,17 +314,17 @@ func (o *options) GlobalTypeStringTransformFn() TypeStringTransformFn { return o.globalTypeStringTransformFn } -func (o *options) SetAggregationTypesPool(pool AggregationTypesPool) AggregationTypesOptions { +func (o *options) SetTypesPool(pool TypesPool) TypesOptions { opts := *o opts.aggTypesPool = pool return &opts } -func (o *options) AggregationTypesPool() AggregationTypesPool { +func (o *options) TypesPool() TypesPool { return o.aggTypesPool } -func (o *options) SetQuantilesPool(pool pool.FloatsPool) AggregationTypesOptions { +func (o *options) SetQuantilesPool(pool pool.FloatsPool) TypesOptions { opts := *o opts.quantilesPool = pool return &opts @@ -334,28 +334,28 @@ func (o *options) QuantilesPool() pool.FloatsPool { return o.quantilesPool } -func (o *options) SetGlobalTypeStringOverrides(m map[AggregationType][]byte) AggregationTypesOptions { +func (o *options) SetGlobalTypeStringOverrides(m map[Type][]byte) TypesOptions { opts := *o opts.globalTypeStringOverrides = m opts.computeTypeStrings() return &opts } -func (o *options) SetCounterTypeStringOverrides(m map[AggregationType][]byte) AggregationTypesOptions { +func (o *options) SetCounterTypeStringOverrides(m map[Type][]byte) TypesOptions { opts := *o opts.counterTypeStringOverride = m opts.computeTypeStrings() return &opts } -func (o *options) SetTimerTypeStringOverrides(m map[AggregationType][]byte) AggregationTypesOptions { +func (o *options) SetTimerTypeStringOverrides(m map[Type][]byte) TypesOptions { opts := *o opts.timerTypeStringOverride = m opts.computeTypeStrings() return &opts } -func (o *options) SetGaugeTypeStringOverrides(m map[AggregationType][]byte) AggregationTypesOptions { +func (o *options) SetGaugeTypeStringOverrides(m map[Type][]byte) TypesOptions { opts := *o opts.gaugeTypeStringOverride = m opts.computeTypeStrings() @@ -371,30 +371,30 @@ func (o *options) DefaultTimerAggregationTypeStrings() [][]byte { } func (o *options) DefaultGaugeAggregationTypeStrings() [][]byte { - return o.defaultGaugeAggregationTypeStrings + return o.defaultGaugeAggregationTypestrings } -func (o *options) TypeStringForCounter(aggType AggregationType) []byte { +func (o *options) TypeStringForCounter(aggType Type) []byte { return o.counterTypeStrings[aggType.ID()] } -func (o *options) TypeStringForTimer(aggType AggregationType) []byte { +func (o *options) TypeStringForTimer(aggType Type) []byte { return o.timerTypeStrings[aggType.ID()] } -func (o *options) TypeStringForGauge(aggType AggregationType) []byte { +func (o *options) TypeStringForGauge(aggType Type) []byte { return o.gaugeTypeStrings[aggType.ID()] } -func (o *options) AggregationTypeForCounter(value []byte) AggregationType { +func (o *options) TypeForCounter(value []byte) Type { return aggregationTypeWithTypeString(value, o.counterTypeStrings) } -func (o *options) AggregationTypeForTimer(value []byte) AggregationType { +func (o *options) TypeForTimer(value []byte) Type { return aggregationTypeWithTypeString(value, o.timerTypeStrings) } -func (o *options) AggregationTypeForGauge(value []byte) AggregationType { +func (o *options) TypeForGauge(value []byte) Type { return aggregationTypeWithTypeString(value, o.gaugeTypeStrings) } @@ -402,8 +402,8 @@ func (o *options) TimerQuantiles() []float64 { return o.timerQuantiles } -func (o *options) IsContainedInDefaultAggregationTypes(at AggregationType, mt metric.Type) bool { - var aggTypes AggregationTypes +func (o *options) IsContainedInDefaultAggregationTypes(at Type, mt metric.Type) bool { + var aggTypes Types switch mt { case metric.CounterType: aggTypes = o.DefaultCounterAggregationTypes() @@ -416,13 +416,13 @@ func (o *options) IsContainedInDefaultAggregationTypes(at AggregationType, mt me return aggTypes.Contains(at) } -func aggregationTypeWithTypeString(value []byte, typeStrings [][]byte) AggregationType { +func aggregationTypeWithTypeString(value []byte, typeStrings [][]byte) Type { for aggType, b := range typeStrings { if bytes.Equal(b, value) { - return AggregationType(aggType) + return Type(aggType) } } - return UnknownAggregationType + return UnknownType } func (o *options) computeAllDerived() { @@ -441,15 +441,15 @@ func (o *options) computeTypeStrings() { o.computeTimerTypeStrings() o.computeGaugeTypeStrings() o.computeDefaultCounterAggregationTypeString() - o.computeDefaultTimerAggregationTypeString() - o.computeDefaultGaugeAggregationTypeString() + o.computeDefaultTimerTypeSAggregationTypes() + o.computeDefaultGaugeAggregationTypestring() } func (o *options) computeDefaultTypeStrings() { - o.defaultTypeStrings = make([][]byte, MaxAggregationTypeID+1) - o.defaultTypeStrings[UnknownAggregationType.ID()] = defaultUnknownTypeString + o.defaultTypeStrings = make([][]byte, maxTypeID+1) + o.defaultTypeStrings[UnknownType.ID()] = defaultUnknownTypeString transformFn := o.GlobalTypeStringTransformFn() - for aggType := range ValidAggregationTypes { + for aggType := range ValidTypes { var typeString []byte switch aggType { case Last: @@ -496,10 +496,10 @@ func (o *options) computeGaugeTypeStrings() { o.gaugeTypeStrings = o.computeOverrideTypeStrings(o.gaugeTypeStringOverride) } -func (o options) computeOverrideTypeStrings(m map[AggregationType][]byte) [][]byte { +func (o options) computeOverrideTypeStrings(m map[Type][]byte) [][]byte { res := make([][]byte, len(o.defaultTypeStrings)) for aggType, defaultTypeString := range o.defaultTypeStrings { - if overrideTypeString, ok := m[AggregationType(aggType)]; ok { + if overrideTypeString, ok := m[Type(aggType)]; ok { res[aggType] = overrideTypeString continue } @@ -515,17 +515,17 @@ func (o *options) computeDefaultCounterAggregationTypeString() { } } -func (o *options) computeDefaultTimerAggregationTypeString() { +func (o *options) computeDefaultTimerTypeSAggregationTypes() { o.defaultTimerAggregationTypeStrings = make([][]byte, len(o.DefaultTimerAggregationTypes())) for i, aggType := range o.DefaultTimerAggregationTypes() { o.defaultTimerAggregationTypeStrings[i] = o.TypeStringForTimer(aggType) } } -func (o *options) computeDefaultGaugeAggregationTypeString() { - o.defaultGaugeAggregationTypeStrings = make([][]byte, len(o.DefaultGaugeAggregationTypes())) +func (o *options) computeDefaultGaugeAggregationTypestring() { + o.defaultGaugeAggregationTypestrings = make([][]byte, len(o.DefaultGaugeAggregationTypes())) for i, aggType := range o.DefaultGaugeAggregationTypes() { - o.defaultGaugeAggregationTypeStrings[i] = o.TypeStringForGauge(aggType) + o.defaultGaugeAggregationTypestrings[i] = o.TypeStringForGauge(aggType) } } diff --git a/policy/aggregation_type_options_test.go b/aggregation/types_options_test.go similarity index 72% rename from policy/aggregation_type_options_test.go rename to aggregation/types_options_test.go index bdf4170..29f52ec 100644 --- a/policy/aggregation_type_options_test.go +++ b/aggregation/types_options_test.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package policy +package aggregation import ( "fmt" @@ -26,11 +26,12 @@ import ( "testing" "github.com/m3db/m3x/pool" + "github.com/stretchr/testify/require" ) -func TestAggregationTypesOptionsValidateDefault(t *testing.T) { - o := NewAggregationTypesOptions() +func TestTypesOptionsValidateDefault(t *testing.T) { + o := NewTypesOptions() // Validate base options require.Equal(t, defaultDefaultCounterAggregationTypes, o.DefaultCounterAggregationTypes()) @@ -58,7 +59,7 @@ func TestAggregationTypesOptionsValidateDefault(t *testing.T) { require.Equal(t, [][]byte{nil}, o.DefaultGaugeAggregationTypeStrings()) } -func validateQuantiles(t *testing.T, o AggregationTypesOptions) { +func validateQuantiles(t *testing.T, o TypesOptions) { typeStringFn := o.TimerQuantileTypeStringFn() quantiles, _ := o.DefaultTimerAggregationTypes().PooledQuantiles(nil) require.Equal(t, o.TimerQuantiles(), quantiles) @@ -73,169 +74,169 @@ func validateQuantiles(t *testing.T, o AggregationTypesOptions) { } func TestOptionsSetDefaultCounterAggregationTypes(t *testing.T) { - aggTypes := AggregationTypes{Mean, SumSq} - o := NewAggregationTypesOptions().SetDefaultCounterAggregationTypes(aggTypes) + aggTypes := Types{Mean, SumSq} + o := NewTypesOptions().SetDefaultCounterAggregationTypes(aggTypes) require.Equal(t, aggTypes, o.DefaultCounterAggregationTypes()) require.Equal(t, [][]byte{[]byte(defaultMeanTypeString), []byte(defaultSumSqTypeString)}, o.DefaultCounterAggregationTypeStrings()) } func TestOptionsSetDefaultTimerAggregationTypes(t *testing.T) { - aggTypes := AggregationTypes{Mean, SumSq, P99, P9999} - o := NewAggregationTypesOptions().SetDefaultTimerAggregationTypes(aggTypes) + aggTypes := Types{Mean, SumSq, P99, P9999} + o := NewTypesOptions().SetDefaultTimerAggregationTypes(aggTypes) require.Equal(t, aggTypes, o.DefaultTimerAggregationTypes()) require.Equal(t, []float64{0.99, 0.9999}, o.TimerQuantiles()) require.Equal(t, [][]byte{[]byte(defaultMeanTypeString), []byte(defaultSumSqTypeString), []byte("p99"), []byte("p9999")}, o.DefaultTimerAggregationTypeStrings()) } func TestOptionsSetDefaultGaugeAggregationTypes(t *testing.T) { - aggTypes := AggregationTypes{Mean, SumSq} - o := NewAggregationTypesOptions().SetDefaultGaugeAggregationTypes(aggTypes) + aggTypes := Types{Mean, SumSq} + o := NewTypesOptions().SetDefaultGaugeAggregationTypes(aggTypes) require.Equal(t, aggTypes, o.DefaultGaugeAggregationTypes()) require.Equal(t, [][]byte{[]byte(defaultMeanTypeString), []byte(defaultSumSqTypeString)}, o.DefaultGaugeAggregationTypeStrings()) } func TestOptionsSetTimerSumSqTypeString(t *testing.T) { newSumSqTypeString := []byte("testTimerSumSqTypeString") - o := NewAggregationTypesOptions(). - SetDefaultCounterAggregationTypes(AggregationTypes{SumSq}). - SetDefaultTimerAggregationTypes(AggregationTypes{SumSq}). - SetDefaultGaugeAggregationTypes(AggregationTypes{SumSq}). - SetGlobalTypeStringOverrides(map[AggregationType][]byte{SumSq: newSumSqTypeString}) + o := NewTypesOptions(). + SetDefaultCounterAggregationTypes(Types{SumSq}). + SetDefaultTimerAggregationTypes(Types{SumSq}). + SetDefaultGaugeAggregationTypes(Types{SumSq}). + SetGlobalTypeStringOverrides(map[Type][]byte{SumSq: newSumSqTypeString}) require.Equal(t, newSumSqTypeString, o.TypeStringForCounter(SumSq)) require.Equal(t, newSumSqTypeString, o.TypeStringForTimer(SumSq)) require.Equal(t, newSumSqTypeString, o.TypeStringForGauge(SumSq)) require.Equal(t, [][]byte{[]byte(newSumSqTypeString)}, o.DefaultCounterAggregationTypeStrings()) require.Equal(t, [][]byte{[]byte(newSumSqTypeString)}, o.DefaultTimerAggregationTypeStrings()) require.Equal(t, [][]byte{[]byte(newSumSqTypeString)}, o.DefaultGaugeAggregationTypeStrings()) - require.Equal(t, SumSq, o.AggregationTypeForCounter([]byte("testTimerSumSqTypeString"))) - require.Equal(t, SumSq, o.AggregationTypeForTimer([]byte("testTimerSumSqTypeString"))) - require.Equal(t, SumSq, o.AggregationTypeForGauge([]byte("testTimerSumSqTypeString"))) + require.Equal(t, SumSq, o.TypeForCounter([]byte("testTimerSumSqTypeString"))) + require.Equal(t, SumSq, o.TypeForTimer([]byte("testTimerSumSqTypeString"))) + require.Equal(t, SumSq, o.TypeForGauge([]byte("testTimerSumSqTypeString"))) require.NoError(t, o.Validate()) } func TestOptionsSetTimerMeanTypeString(t *testing.T) { newMeanTypeString := []byte("testTimerMeanTypeString") - o := NewAggregationTypesOptions(). - SetDefaultCounterAggregationTypes(AggregationTypes{Mean}). - SetDefaultTimerAggregationTypes(AggregationTypes{Mean}). - SetDefaultGaugeAggregationTypes(AggregationTypes{Mean}). - SetGlobalTypeStringOverrides(map[AggregationType][]byte{Mean: newMeanTypeString}) + o := NewTypesOptions(). + SetDefaultCounterAggregationTypes(Types{Mean}). + SetDefaultTimerAggregationTypes(Types{Mean}). + SetDefaultGaugeAggregationTypes(Types{Mean}). + SetGlobalTypeStringOverrides(map[Type][]byte{Mean: newMeanTypeString}) require.Equal(t, newMeanTypeString, o.TypeStringForCounter(Mean)) require.Equal(t, newMeanTypeString, o.TypeStringForTimer(Mean)) require.Equal(t, newMeanTypeString, o.TypeStringForGauge(Mean)) require.Equal(t, [][]byte{[]byte(newMeanTypeString)}, o.DefaultCounterAggregationTypeStrings()) require.Equal(t, [][]byte{[]byte(newMeanTypeString)}, o.DefaultTimerAggregationTypeStrings()) require.Equal(t, [][]byte{[]byte(newMeanTypeString)}, o.DefaultGaugeAggregationTypeStrings()) - require.Equal(t, Mean, o.AggregationTypeForCounter([]byte("testTimerMeanTypeString"))) - require.Equal(t, Mean, o.AggregationTypeForTimer([]byte("testTimerMeanTypeString"))) - require.Equal(t, Mean, o.AggregationTypeForGauge([]byte("testTimerMeanTypeString"))) + require.Equal(t, Mean, o.TypeForCounter([]byte("testTimerMeanTypeString"))) + require.Equal(t, Mean, o.TypeForTimer([]byte("testTimerMeanTypeString"))) + require.Equal(t, Mean, o.TypeForGauge([]byte("testTimerMeanTypeString"))) require.NoError(t, o.Validate()) } func TestOptionsSetCounterSumTypeString(t *testing.T) { newSumTypeString := []byte("testSumTypeString") - o := NewAggregationTypesOptions(). - SetDefaultCounterAggregationTypes(AggregationTypes{Sum}). - SetDefaultTimerAggregationTypes(AggregationTypes{Sum}). - SetDefaultGaugeAggregationTypes(AggregationTypes{Sum}). - SetGlobalTypeStringOverrides(map[AggregationType][]byte{Sum: newSumTypeString}) + o := NewTypesOptions(). + SetDefaultCounterAggregationTypes(Types{Sum}). + SetDefaultTimerAggregationTypes(Types{Sum}). + SetDefaultGaugeAggregationTypes(Types{Sum}). + SetGlobalTypeStringOverrides(map[Type][]byte{Sum: newSumTypeString}) require.Equal(t, []byte(nil), o.TypeStringForCounter(Sum)) require.Equal(t, newSumTypeString, o.TypeStringForTimer(Sum)) require.Equal(t, newSumTypeString, o.TypeStringForGauge(Sum)) require.Equal(t, [][]byte{[]byte(nil)}, o.DefaultCounterAggregationTypeStrings()) require.Equal(t, [][]byte{[]byte(newSumTypeString)}, o.DefaultTimerAggregationTypeStrings()) require.Equal(t, [][]byte{[]byte(newSumTypeString)}, o.DefaultGaugeAggregationTypeStrings()) - require.Equal(t, Sum, o.AggregationTypeForCounter([]byte(nil))) - require.Equal(t, Sum, o.AggregationTypeForTimer([]byte("testSumTypeString"))) - require.Equal(t, Sum, o.AggregationTypeForGauge([]byte("testSumTypeString"))) + require.Equal(t, Sum, o.TypeForCounter([]byte(nil))) + require.Equal(t, Sum, o.TypeForTimer([]byte("testSumTypeString"))) + require.Equal(t, Sum, o.TypeForGauge([]byte("testSumTypeString"))) require.NoError(t, o.Validate()) } func TestOptionsSetGaugeLastTypeString(t *testing.T) { newLastTypeString := []byte("testLastTypeString") - o := NewAggregationTypesOptions(). - SetDefaultCounterAggregationTypes(AggregationTypes{Last}). - SetDefaultTimerAggregationTypes(AggregationTypes{Last}). - SetDefaultGaugeAggregationTypes(AggregationTypes{Last}). - SetGlobalTypeStringOverrides(map[AggregationType][]byte{Last: newLastTypeString}) + o := NewTypesOptions(). + SetDefaultCounterAggregationTypes(Types{Last}). + SetDefaultTimerAggregationTypes(Types{Last}). + SetDefaultGaugeAggregationTypes(Types{Last}). + SetGlobalTypeStringOverrides(map[Type][]byte{Last: newLastTypeString}) require.Equal(t, newLastTypeString, o.TypeStringForCounter(Last)) require.Equal(t, newLastTypeString, o.TypeStringForTimer(Last)) require.Equal(t, []byte(nil), o.TypeStringForGauge(Last)) require.Equal(t, [][]byte{[]byte(newLastTypeString)}, o.DefaultCounterAggregationTypeStrings()) require.Equal(t, [][]byte{[]byte(newLastTypeString)}, o.DefaultTimerAggregationTypeStrings()) require.Equal(t, [][]byte{[]byte(nil)}, o.DefaultGaugeAggregationTypeStrings()) - require.Equal(t, Last, o.AggregationTypeForCounter([]byte("testLastTypeString"))) - require.Equal(t, Last, o.AggregationTypeForTimer([]byte("testLastTypeString"))) - require.Equal(t, Last, o.AggregationTypeForGauge([]byte(nil))) + require.Equal(t, Last, o.TypeForCounter([]byte("testLastTypeString"))) + require.Equal(t, Last, o.TypeForTimer([]byte("testLastTypeString"))) + require.Equal(t, Last, o.TypeForGauge([]byte(nil))) require.NoError(t, o.Validate()) } func TestOptionsSetTimerCountTypeString(t *testing.T) { newCountTypeString := []byte("testTimerCountTypeString") - o := NewAggregationTypesOptions(). - SetDefaultCounterAggregationTypes(AggregationTypes{Count}). - SetDefaultTimerAggregationTypes(AggregationTypes{Count}). - SetDefaultGaugeAggregationTypes(AggregationTypes{Count}). - SetGlobalTypeStringOverrides(map[AggregationType][]byte{Count: newCountTypeString}) + o := NewTypesOptions(). + SetDefaultCounterAggregationTypes(Types{Count}). + SetDefaultTimerAggregationTypes(Types{Count}). + SetDefaultGaugeAggregationTypes(Types{Count}). + SetGlobalTypeStringOverrides(map[Type][]byte{Count: newCountTypeString}) require.Equal(t, newCountTypeString, o.TypeStringForCounter(Count)) require.Equal(t, newCountTypeString, o.TypeStringForTimer(Count)) require.Equal(t, newCountTypeString, o.TypeStringForGauge(Count)) require.Equal(t, [][]byte{[]byte(newCountTypeString)}, o.DefaultCounterAggregationTypeStrings()) require.Equal(t, [][]byte{[]byte(newCountTypeString)}, o.DefaultTimerAggregationTypeStrings()) require.Equal(t, [][]byte{[]byte(newCountTypeString)}, o.DefaultGaugeAggregationTypeStrings()) - require.Equal(t, Count, o.AggregationTypeForCounter([]byte("testTimerCountTypeString"))) - require.Equal(t, Count, o.AggregationTypeForTimer([]byte("testTimerCountTypeString"))) - require.Equal(t, Count, o.AggregationTypeForGauge([]byte("testTimerCountTypeString"))) + require.Equal(t, Count, o.TypeForCounter([]byte("testTimerCountTypeString"))) + require.Equal(t, Count, o.TypeForTimer([]byte("testTimerCountTypeString"))) + require.Equal(t, Count, o.TypeForGauge([]byte("testTimerCountTypeString"))) require.NoError(t, o.Validate()) } func TestOptionsSetTimerStdevTypeString(t *testing.T) { newStdevTypeString := []byte("testTimerStdevTypeString") - o := NewAggregationTypesOptions(). - SetDefaultCounterAggregationTypes(AggregationTypes{Stdev}). - SetDefaultTimerAggregationTypes(AggregationTypes{Stdev}). - SetDefaultGaugeAggregationTypes(AggregationTypes{Stdev}). - SetGlobalTypeStringOverrides(map[AggregationType][]byte{Stdev: newStdevTypeString}) + o := NewTypesOptions(). + SetDefaultCounterAggregationTypes(Types{Stdev}). + SetDefaultTimerAggregationTypes(Types{Stdev}). + SetDefaultGaugeAggregationTypes(Types{Stdev}). + SetGlobalTypeStringOverrides(map[Type][]byte{Stdev: newStdevTypeString}) require.Equal(t, newStdevTypeString, o.TypeStringForCounter(Stdev)) require.Equal(t, newStdevTypeString, o.TypeStringForTimer(Stdev)) require.Equal(t, newStdevTypeString, o.TypeStringForGauge(Stdev)) require.Equal(t, [][]byte{[]byte(newStdevTypeString)}, o.DefaultCounterAggregationTypeStrings()) require.Equal(t, [][]byte{[]byte(newStdevTypeString)}, o.DefaultTimerAggregationTypeStrings()) require.Equal(t, [][]byte{[]byte(newStdevTypeString)}, o.DefaultGaugeAggregationTypeStrings()) - require.Equal(t, Stdev, o.AggregationTypeForCounter([]byte("testTimerStdevTypeString"))) - require.Equal(t, Stdev, o.AggregationTypeForTimer([]byte("testTimerStdevTypeString"))) - require.Equal(t, Stdev, o.AggregationTypeForGauge([]byte("testTimerStdevTypeString"))) + require.Equal(t, Stdev, o.TypeForCounter([]byte("testTimerStdevTypeString"))) + require.Equal(t, Stdev, o.TypeForTimer([]byte("testTimerStdevTypeString"))) + require.Equal(t, Stdev, o.TypeForGauge([]byte("testTimerStdevTypeString"))) require.NoError(t, o.Validate()) } func TestOptionsSetTimerMedianTypeString(t *testing.T) { newMedianTypeString := []byte("testTimerMedianTypeString") - o := NewAggregationTypesOptions(). - SetDefaultCounterAggregationTypes(AggregationTypes{Median}). - SetDefaultTimerAggregationTypes(AggregationTypes{Median}). - SetDefaultGaugeAggregationTypes(AggregationTypes{Median}). - SetGlobalTypeStringOverrides(map[AggregationType][]byte{Median: newMedianTypeString}) + o := NewTypesOptions(). + SetDefaultCounterAggregationTypes(Types{Median}). + SetDefaultTimerAggregationTypes(Types{Median}). + SetDefaultGaugeAggregationTypes(Types{Median}). + SetGlobalTypeStringOverrides(map[Type][]byte{Median: newMedianTypeString}) require.Equal(t, newMedianTypeString, o.TypeStringForCounter(Median)) require.Equal(t, newMedianTypeString, o.TypeStringForTimer(Median)) require.Equal(t, newMedianTypeString, o.TypeStringForGauge(Median)) require.Equal(t, [][]byte{[]byte(newMedianTypeString)}, o.DefaultCounterAggregationTypeStrings()) require.Equal(t, [][]byte{[]byte(newMedianTypeString)}, o.DefaultTimerAggregationTypeStrings()) require.Equal(t, [][]byte{[]byte(newMedianTypeString)}, o.DefaultGaugeAggregationTypeStrings()) - require.Equal(t, Median, o.AggregationTypeForCounter([]byte("testTimerMedianTypeString"))) - require.Equal(t, Median, o.AggregationTypeForTimer([]byte("testTimerMedianTypeString"))) - require.Equal(t, Median, o.AggregationTypeForGauge([]byte("testTimerMedianTypeString"))) + require.Equal(t, Median, o.TypeForCounter([]byte("testTimerMedianTypeString"))) + require.Equal(t, Median, o.TypeForTimer([]byte("testTimerMedianTypeString"))) + require.Equal(t, Median, o.TypeForGauge([]byte("testTimerMedianTypeString"))) require.NoError(t, o.Validate()) } func TestOptionsSetTimerQuantileTypeStringFn(t *testing.T) { fn := func(q float64) []byte { return []byte(fmt.Sprintf("%1.2f", q)) } - o := NewAggregationTypesOptions().SetTimerQuantileTypeStringFn(fn) + o := NewTypesOptions().SetTimerQuantileTypeStringFn(fn) require.Equal(t, []byte("0.96"), o.TimerQuantileTypeStringFn()(0.9582)) validateQuantiles(t, o) } func TestOptionsCounterTypeString(t *testing.T) { - o := NewAggregationTypesOptions() + o := NewTypesOptions() require.Equal(t, []byte(defaultLastTypeString), o.TypeStringForCounter(Last)) require.Equal(t, []byte(defaultMinTypeString), o.TypeStringForCounter(Min)) require.Equal(t, []byte(defaultMaxTypeString), o.TypeStringForCounter(Max)) @@ -248,7 +249,7 @@ func TestOptionsCounterTypeString(t *testing.T) { } func TestOptionsTimerTypeString(t *testing.T) { - o := NewAggregationTypesOptions() + o := NewTypesOptions() require.Equal(t, []byte(defaultLastTypeString), o.TypeStringForTimer(Last)) require.Equal(t, []byte(defaultMinTypeString), o.TypeStringForTimer(Min)) require.Equal(t, []byte(defaultMaxTypeString), o.TypeStringForTimer(Max)) @@ -261,7 +262,7 @@ func TestOptionsTimerTypeString(t *testing.T) { } func TestOptionsGaugeTypeString(t *testing.T) { - o := NewAggregationTypesOptions() + o := NewTypesOptions() require.Equal(t, []byte(nil), o.TypeStringForGauge(Last)) require.Equal(t, []byte(defaultMinTypeString), o.TypeStringForGauge(Min)) require.Equal(t, []byte(defaultMaxTypeString), o.TypeStringForGauge(Max)) @@ -274,7 +275,7 @@ func TestOptionsGaugeTypeString(t *testing.T) { } func TestOptionsTypeStringTransform(t *testing.T) { - o := NewAggregationTypesOptions() + o := NewTypesOptions() for _, aggType := range o.DefaultTimerAggregationTypes() { require.False(t, strings.HasPrefix(string(o.TypeStringForTimer(aggType)), ".")) } @@ -284,12 +285,12 @@ func TestOptionsTypeStringTransform(t *testing.T) { require.True(t, strings.HasPrefix(string(o.TypeStringForTimer(aggType)), ".")) } - o = o.SetTimerTypeStringOverrides(map[AggregationType][]byte{P95: []byte("no_dot")}) + o = o.SetTimerTypeStringOverrides(map[Type][]byte{P95: []byte("no_dot")}) require.Equal(t, []byte("no_dot"), o.TypeStringForTimer(P95)) } func TestOptionTimerQuantileTypeString(t *testing.T) { - o := NewAggregationTypesOptions() + o := NewTypesOptions() cases := []struct { quantile float64 b []byte @@ -351,17 +352,17 @@ func TestOptionTimerQuantileTypeString(t *testing.T) { func TestSetQuantilesPool(t *testing.T) { p := pool.NewFloatsPool(nil, nil) - o := NewAggregationTypesOptions().SetQuantilesPool(p) + o := NewTypesOptions().SetQuantilesPool(p) require.Equal(t, p, o.QuantilesPool()) } func TestSetCounterTypeStringOverride(t *testing.T) { - m := map[AggregationType][]byte{ + m := map[Type][]byte{ Sum: nil, Mean: []byte("test"), } - o := NewAggregationTypesOptions().SetCounterTypeStringOverrides(m) + o := NewTypesOptions().SetCounterTypeStringOverrides(m) require.Equal(t, [][]byte{nil}, o.DefaultCounterAggregationTypeStrings()) require.Equal(t, []byte("test"), o.TypeStringForCounter(Mean)) require.Equal(t, []byte(defaultCountTypeString), o.TypeStringForCounter(Count)) @@ -369,26 +370,26 @@ func TestSetCounterTypeStringOverride(t *testing.T) { } func TestSetCounterTypeStringOverrideDuplicate(t *testing.T) { - m := map[AggregationType][]byte{ + m := map[Type][]byte{ Sum: nil, Mean: []byte("test"), Max: nil, } - o := NewAggregationTypesOptions().SetCounterTypeStringOverrides(m) + o := NewTypesOptions().SetCounterTypeStringOverrides(m) require.Equal(t, []byte(nil), o.TypeStringForCounter(Sum)) require.Equal(t, []byte(nil), o.TypeStringForCounter(Max)) require.Error(t, o.Validate()) } func TestSetTimerTypeStringOverride(t *testing.T) { - m := map[AggregationType][]byte{ + m := map[Type][]byte{ Min: []byte(defaultMinTypeString), Max: []byte(defaultMaxTypeString), Mean: []byte("test"), } - o := NewAggregationTypesOptions().SetTimerTypeStringOverrides(m) + o := NewTypesOptions().SetTimerTypeStringOverrides(m) require.Equal(t, []byte("test"), o.TypeStringForTimer(Mean)) require.Equal(t, []byte(defaultCountTypeString), o.TypeStringForTimer(Count)) require.Equal(t, []byte(defaultMinTypeString), o.TypeStringForTimer(Min)) @@ -397,26 +398,26 @@ func TestSetTimerTypeStringOverride(t *testing.T) { } func TestSetTimerTypeStringOverrideDuplicate(t *testing.T) { - m := map[AggregationType][]byte{ + m := map[Type][]byte{ Min: []byte(defaultMinTypeString), Max: []byte(defaultMaxTypeString), Mean: []byte("test"), Sum: []byte("test"), } - o := NewAggregationTypesOptions().SetTimerTypeStringOverrides(m) + o := NewTypesOptions().SetTimerTypeStringOverrides(m) require.Equal(t, []byte("test"), o.TypeStringForTimer(Mean)) require.Equal(t, []byte("test"), o.TypeStringForTimer(Sum)) require.Error(t, o.Validate()) } func TestSetGaugeTypeStringOverride(t *testing.T) { - m := map[AggregationType][]byte{ + m := map[Type][]byte{ Last: nil, Mean: []byte("test"), } - o := NewAggregationTypesOptions().SetGaugeTypeStringOverrides(m) + o := NewTypesOptions().SetGaugeTypeStringOverrides(m) require.Equal(t, [][]byte{nil}, o.DefaultGaugeAggregationTypeStrings()) require.Equal(t, []byte("test"), o.TypeStringForGauge(Mean)) require.Equal(t, []byte(nil), o.TypeStringForGauge(Last)) @@ -425,13 +426,13 @@ func TestSetGaugeTypeStringOverride(t *testing.T) { } func TestSetGaugeTypeStringOverrideDuplicate(t *testing.T) { - m := map[AggregationType][]byte{ + m := map[Type][]byte{ Last: nil, Mean: []byte("test"), Max: []byte("test"), } - o := NewAggregationTypesOptions().SetGaugeTypeStringOverrides(m) + o := NewTypesOptions().SetGaugeTypeStringOverrides(m) require.Equal(t, []byte("test"), o.TypeStringForGauge(Mean)) require.Equal(t, []byte("test"), o.TypeStringForGauge(Max)) require.Error(t, o.Validate()) diff --git a/policy/aggregation_type_pool.go b/aggregation/types_pool.go similarity index 65% rename from policy/aggregation_type_pool.go rename to aggregation/types_pool.go index 70c15fa..0511d19 100644 --- a/policy/aggregation_type_pool.go +++ b/aggregation/types_pool.go @@ -18,44 +18,44 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package policy +package aggregation import "github.com/m3db/m3x/pool" -// AggregationTypesAlloc allocates new aggregation types. -type AggregationTypesAlloc func() AggregationTypes +// TypesAlloc allocates new aggregation types. +type TypesAlloc func() Types -// AggregationTypesPool provides a pool of aggregation types. -type AggregationTypesPool interface { +// TypesPool provides a pool of aggregation types. +type TypesPool interface { // Init initializes the aggregation types pool. - Init(alloc AggregationTypesAlloc) + Init(alloc TypesAlloc) // Get gets an empty list of aggregation types from the pool. - Get() AggregationTypes + Get() Types // Put returns aggregation types to the pool. - Put(value AggregationTypes) + Put(value Types) } -type aggTypesPool struct { +type typesPool struct { pool pool.ObjectPool } -// NewAggregationTypesPool creates a new pool for aggregation types. -func NewAggregationTypesPool(opts pool.ObjectPoolOptions) AggregationTypesPool { - return &aggTypesPool{pool: pool.NewObjectPool(opts)} +// NewTypesPool creates a new pool for aggregation types. +func NewTypesPool(opts pool.ObjectPoolOptions) TypesPool { + return &typesPool{pool: pool.NewObjectPool(opts)} } -func (p *aggTypesPool) Init(alloc AggregationTypesAlloc) { +func (p *typesPool) Init(alloc TypesAlloc) { p.pool.Init(func() interface{} { return alloc() }) } -func (p *aggTypesPool) Get() AggregationTypes { - return p.pool.Get().(AggregationTypes) +func (p *typesPool) Get() Types { + return p.pool.Get().(Types) } -func (p *aggTypesPool) Put(value AggregationTypes) { +func (p *typesPool) Put(value Types) { p.pool.Put(value[:0]) } diff --git a/policy/aggregation_type_pool_test.go b/aggregation/types_pool_test.go similarity index 82% rename from policy/aggregation_type_pool_test.go rename to aggregation/types_pool_test.go index caf459b..346fcaa 100644 --- a/policy/aggregation_type_pool_test.go +++ b/aggregation/types_pool_test.go @@ -18,30 +18,31 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package policy +package aggregation import ( "testing" "github.com/m3db/m3x/pool" + "github.com/stretchr/testify/require" ) -func TestAggregationTypesPool(t *testing.T) { - p := NewAggregationTypesPool(pool.NewObjectPoolOptions().SetSize(1)) - p.Init(func() AggregationTypes { - return make(AggregationTypes, 0, MaxAggregationTypeID) +func TestTypesPool(t *testing.T) { + p := NewTypesPool(pool.NewObjectPoolOptions().SetSize(1)) + p.Init(func() Types { + return make(Types, 0, maxTypeID) }) aggTypes := p.Get() - require.Equal(t, MaxAggregationTypeID, cap(aggTypes)) + require.Equal(t, maxTypeID, cap(aggTypes)) require.Equal(t, 0, len(aggTypes)) aggTypes = append(aggTypes, P9999) p.Put(aggTypes) aggTypes2 := p.Get() - require.Equal(t, MaxAggregationTypeID, cap(aggTypes2)) + require.Equal(t, maxTypeID, cap(aggTypes2)) require.Equal(t, 0, len(aggTypes2)) aggTypes2 = append(aggTypes2, Last) diff --git a/glide.lock b/glide.lock index e5f7979..a898bf3 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: a27f9ec2c1c2a746ebc91ca622475a4c1a26c6c5aeda07df834618a92e370337 -updated: 2018-03-09T12:45:29.656901901-05:00 +hash: 917fc7ee9d082037eb97a270f47c84a851c04d431447b88cc4f6d3c10c8068cb +updated: 2018-03-15T10:32:08.892977581-04:00 imports: - name: github.com/apache/thrift version: 9549b25c77587b29be4e0b5c258221a4ed85d37a @@ -15,7 +15,6 @@ imports: version: 13f360950a79f5864a972c786a10a50e44b69541 subpackages: - gomock - - mockgen - name: github.com/golang/protobuf version: 7390af9dcd3c33042ebaf2474a1724a83cf1a7e6 subpackages: @@ -58,7 +57,7 @@ imports: subpackages: - difflib - name: github.com/shirou/gopsutil - version: c432be29ccce470088d07eea25b3ea7e68a8afbb + version: 5776ff9c7c5d063d574ef53d740f75c68b448e53 subpackages: - cpu - host diff --git a/glide.yaml b/glide.yaml index 7c240c8..8df2920 100644 --- a/glide.yaml +++ b/glide.yaml @@ -87,7 +87,7 @@ testImport: - require - package: github.com/golang/mock - version: ^1 + version: ^1.0.0 subpackages: - gomock - mockgen diff --git a/integration/match_rule_update_stress_test.go b/integration/match_rule_update_stress_test.go index 475e216..79ed899 100644 --- a/integration/match_rule_update_stress_test.go +++ b/integration/match_rule_update_stress_test.go @@ -31,6 +31,7 @@ import ( "github.com/m3db/m3cluster/kv" "github.com/m3db/m3cluster/kv/mem" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/filters" "github.com/m3db/m3metrics/generated/proto/schema" "github.com/m3db/m3metrics/matcher" @@ -95,7 +96,7 @@ func TestMatchWithRuleUpdatesStress(t *testing.T) { 1000, false, []policy.Policy{ - policy.NewPolicy(policy.MustParseStoragePolicy("10s:1d"), policy.DefaultAggregationID), + policy.NewPolicy(policy.MustParseStoragePolicy("10s:1d"), aggregation.DefaultID), }, ), }), @@ -122,7 +123,7 @@ func TestMatchWithRuleUpdatesStress(t *testing.T) { 500, false, []policy.Policy{ - policy.NewPolicy(policy.MustParseStoragePolicy("1m:2d"), policy.DefaultAggregationID), + policy.NewPolicy(policy.MustParseStoragePolicy("1m:2d"), aggregation.DefaultID), }, ), }, @@ -144,7 +145,7 @@ func TestMatchWithRuleUpdatesStress(t *testing.T) { 1000, false, []policy.Policy{ - policy.NewPolicy(policy.MustParseStoragePolicy("10s:1d"), policy.DefaultAggregationID), + policy.NewPolicy(policy.MustParseStoragePolicy("10s:1d"), aggregation.DefaultID), }, ), }), @@ -156,7 +157,7 @@ func TestMatchWithRuleUpdatesStress(t *testing.T) { 500, false, []policy.Policy{ - policy.NewPolicy(policy.MustParseStoragePolicy("1m:2d"), policy.DefaultAggregationID), + policy.NewPolicy(policy.MustParseStoragePolicy("1m:2d"), aggregation.DefaultID), }, ), }, diff --git a/matcher/cache/list_test.go b/matcher/cache/list_test.go index b76a942..2e6be31 100644 --- a/matcher/cache/list_test.go +++ b/matcher/cache/list_test.go @@ -26,6 +26,7 @@ import ( "testing" "time" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/policy" "github.com/m3db/m3metrics/rules" xid "github.com/m3db/m3x/ident" @@ -40,16 +41,16 @@ var ( 0, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 0, true, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), aggregation.DefaultID), }, ), } @@ -65,16 +66,16 @@ var ( 0, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 0, true, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), aggregation.DefaultID), }, ), }, diff --git a/matcher/config.go b/matcher/config.go index 12a18e8..1fde0c5 100644 --- a/matcher/config.go +++ b/matcher/config.go @@ -26,11 +26,11 @@ import ( "github.com/m3db/m3cluster/client" "github.com/m3db/m3cluster/kv" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/filters" "github.com/m3db/m3metrics/matcher/cache" "github.com/m3db/m3metrics/metric/id" "github.com/m3db/m3metrics/metric/id/m3" - "github.com/m3db/m3metrics/policy" "github.com/m3db/m3metrics/rules" "github.com/m3db/m3x/clock" "github.com/m3db/m3x/instrument" @@ -39,16 +39,16 @@ import ( // Configuration is config used to create a Matcher. type Configuration struct { - InitWatchTimeout time.Duration `yaml:"initWatchTimeout"` - RulesKVConfig kv.Configuration `yaml:"rulesKVConfig"` - NamespacesKey string `yaml:"namespacesKey" validate:"nonzero"` - RuleSetKeyFmt string `yaml:"ruleSetKeyFmt" validate:"nonzero"` - NamespaceTag string `yaml:"namespaceTag" validate:"nonzero"` - DefaultNamespace string `yaml:"defaultNamespace" validate:"nonzero"` - NameTagKey string `yaml:"nameTagKey" validate:"nonzero"` - MatchRangePast *time.Duration `yaml:"matchRangePast"` - SortedTagIteratorPool pool.ObjectPoolConfiguration `yaml:"sortedTagIteratorPool"` - AggregationTypes policy.AggregationTypesConfiguration `yaml:"aggregationTypes"` + InitWatchTimeout time.Duration `yaml:"initWatchTimeout"` + RulesKVConfig kv.Configuration `yaml:"rulesKVConfig"` + NamespacesKey string `yaml:"namespacesKey" validate:"nonzero"` + RuleSetKeyFmt string `yaml:"ruleSetKeyFmt" validate:"nonzero"` + NamespaceTag string `yaml:"namespaceTag" validate:"nonzero"` + DefaultNamespace string `yaml:"defaultNamespace" validate:"nonzero"` + NameTagKey string `yaml:"nameTagKey" validate:"nonzero"` + MatchRangePast *time.Duration `yaml:"matchRangePast"` + SortedTagIteratorPool pool.ObjectPoolConfiguration `yaml:"sortedTagIteratorPool"` + AggregationTypes aggregation.TypesConfiguration `yaml:"aggregationTypes"` } // NewNamespaces creates a matcher.Namespaces. diff --git a/matcher/namespaces.go b/matcher/namespaces.go index 5caf670..e91f4df 100644 --- a/matcher/namespaces.go +++ b/matcher/namespaces.go @@ -27,9 +27,9 @@ import ( "github.com/m3db/m3cluster/kv" "github.com/m3db/m3cluster/kv/util/runtime" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/generated/proto/schema" "github.com/m3db/m3metrics/metric" - "github.com/m3db/m3metrics/policy" "github.com/m3db/m3metrics/rules" "github.com/m3db/m3x/clock" xid "github.com/m3db/m3x/ident" @@ -58,7 +58,7 @@ type Namespaces interface { // ReverseMatch reverse matches the matching policies for a given id in a given namespace // between [fromNanos, toNanos), taking into account the metric type and aggregation type for the given id. - ReverseMatch(namespace, id []byte, fromNanos, toNanos int64, mt metric.Type, at policy.AggregationType) rules.MatchResult + ReverseMatch(namespace, id []byte, fromNanos, toNanos int64, mt metric.Type, at aggregation.Type) rules.MatchResult // Close closes the namespaces. Close() @@ -177,7 +177,12 @@ func (n *namespaces) ForwardMatch(namespace, id []byte, fromNanos, toNanos int64 return ruleSet.ForwardMatch(id, fromNanos, toNanos) } -func (n *namespaces) ReverseMatch(namespace, id []byte, fromNanos, toNanos int64, mt metric.Type, at policy.AggregationType) rules.MatchResult { +func (n *namespaces) ReverseMatch( + namespace, id []byte, + fromNanos, toNanos int64, + mt metric.Type, + at aggregation.Type, +) rules.MatchResult { ruleSet, exists := n.ruleSet(namespace) if !exists { return rules.EmptyMatchResult diff --git a/matcher/ruleset.go b/matcher/ruleset.go index 3a6ac09..d2dd324 100644 --- a/matcher/ruleset.go +++ b/matcher/ruleset.go @@ -26,9 +26,9 @@ import ( "github.com/m3db/m3cluster/kv" "github.com/m3db/m3cluster/kv/util/runtime" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/generated/proto/schema" "github.com/m3db/m3metrics/metric" - "github.com/m3db/m3metrics/policy" "github.com/m3db/m3metrics/rules" "github.com/m3db/m3x/clock" "github.com/m3db/m3x/instrument" @@ -162,7 +162,12 @@ func (r *ruleSet) ForwardMatch(id []byte, fromNanos, toNanos int64) rules.MatchR return res } -func (r *ruleSet) ReverseMatch(id []byte, fromNanos, toNanos int64, mt metric.Type, at policy.AggregationType) rules.MatchResult { +func (r *ruleSet) ReverseMatch( + id []byte, + fromNanos, toNanos int64, + mt metric.Type, + at aggregation.Type, +) rules.MatchResult { callStart := r.nowFn() r.RLock() if r.matcher == nil { diff --git a/matcher/ruleset_test.go b/matcher/ruleset_test.go index ae1cb45..6fadf8e 100644 --- a/matcher/ruleset_test.go +++ b/matcher/ruleset_test.go @@ -27,10 +27,10 @@ import ( "github.com/m3db/m3cluster/kv" "github.com/m3db/m3cluster/kv/mem" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/generated/proto/schema" "github.com/m3db/m3metrics/matcher/cache" "github.com/m3db/m3metrics/metric" - "github.com/m3db/m3metrics/policy" "github.com/m3db/m3metrics/rules" "github.com/stretchr/testify/require" @@ -91,12 +91,12 @@ func TestRuleSetReverseMatchWithMatcher(t *testing.T) { toNanos = now.Add(time.Second).UnixNano() ) - require.Equal(t, mockMatcher.res, rs.ReverseMatch([]byte("foo"), fromNanos, toNanos, metric.CounterType, policy.Sum)) + require.Equal(t, mockMatcher.res, rs.ReverseMatch([]byte("foo"), fromNanos, toNanos, metric.CounterType, aggregation.Sum)) require.Equal(t, []byte("foo"), mockMatcher.id) require.Equal(t, fromNanos, mockMatcher.fromNanos) require.Equal(t, toNanos, mockMatcher.toNanos) require.Equal(t, metric.CounterType, mockMatcher.metricType) - require.Equal(t, policy.Sum, mockMatcher.aggregationType) + require.Equal(t, aggregation.Sum, mockMatcher.aggregationType) } func TestToRuleSetNilValue(t *testing.T) { @@ -221,7 +221,7 @@ type mockMatcher struct { toNanos int64 res rules.MatchResult metricType metric.Type - aggregationType policy.AggregationType + aggregationType aggregation.Type } func (mm *mockMatcher) ForwardMatch( @@ -237,7 +237,8 @@ func (mm *mockMatcher) ForwardMatch( func (mm *mockMatcher) ReverseMatch( id []byte, fromNanos, toNanos int64, - mt metric.Type, at policy.AggregationType, + mt metric.Type, + at aggregation.Type, ) rules.MatchResult { mm.id = id mm.fromNanos = fromNanos diff --git a/policy/aggregation_type.go b/policy/aggregation_type.go deleted file mode 100644 index 9c28971..0000000 --- a/policy/aggregation_type.go +++ /dev/null @@ -1,469 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package policy - -import ( - "fmt" - "strings" - - "github.com/m3db/m3metrics/generated/proto/schema" - "github.com/m3db/m3x/pool" -) - -// Supported aggregation types. -const ( - UnknownAggregationType AggregationType = iota - Last - Min - Max - Mean - Median - Count - Sum - SumSq - Stdev - P10 - P20 - P30 - P40 - P50 - P60 - P70 - P80 - P90 - P95 - P99 - P999 - P9999 - - nextAggregationTypeID = iota -) - -const ( - // MaxAggregationTypeID is the largest id of all the valid aggregation types. - // NB(cw) MaxAggregationTypeID is guaranteed to be greater or equal - // to len(ValidAggregationTypes). - // Iff ids of all the valid aggregation types are consecutive, - // MaxAggregationTypeID == len(ValidAggregationTypes). - MaxAggregationTypeID = nextAggregationTypeID - 1 - - // AggregationIDLen is the length of the AggregationID. - // The AggregationIDLen will be 1 when MaxAggregationTypeID <= 63. - AggregationIDLen = (MaxAggregationTypeID)/64 + 1 - - aggregationTypesSeparator = "," - - // aggregation id uses an array of int64 to represent aggregation types. - aggIDBitShift = 6 - aggIDBitMask = 63 -) - -var ( - emptyStruct struct{} - - // DefaultAggregationTypes is a default list of aggregation types. - DefaultAggregationTypes AggregationTypes - - // DefaultAggregationID is a default AggregationID. - DefaultAggregationID AggregationID - - // ValidAggregationTypes is the list of all the valid aggregation types. - ValidAggregationTypes = map[AggregationType]struct{}{ - Last: emptyStruct, - Min: emptyStruct, - Max: emptyStruct, - Mean: emptyStruct, - Median: emptyStruct, - Count: emptyStruct, - Sum: emptyStruct, - SumSq: emptyStruct, - Stdev: emptyStruct, - P10: emptyStruct, - P20: emptyStruct, - P30: emptyStruct, - P40: emptyStruct, - P50: emptyStruct, - P60: emptyStruct, - P70: emptyStruct, - P80: emptyStruct, - P90: emptyStruct, - P95: emptyStruct, - P99: emptyStruct, - P999: emptyStruct, - P9999: emptyStruct, - } - - aggregationTypeStringMap map[string]AggregationType -) - -func init() { - aggregationTypeStringMap = make(map[string]AggregationType, MaxAggregationTypeID) - for aggType := range ValidAggregationTypes { - aggregationTypeStringMap[aggType.String()] = aggType - } -} - -// AggregationType defines a custom aggregation function. -type AggregationType int - -// NewAggregationTypeFromSchema creates an aggregation type from a schema. -func NewAggregationTypeFromSchema(input schema.AggregationType) (AggregationType, error) { - aggType := AggregationType(input) - if !aggType.IsValid() { - return UnknownAggregationType, fmt.Errorf("invalid aggregation type from schema: %s", input) - } - return aggType, nil -} - -// ID returns the id of the AggregationType. -func (a AggregationType) ID() int { - return int(a) -} - -// IsValid checks if an AggregationType is valid. -func (a AggregationType) IsValid() bool { - _, ok := ValidAggregationTypes[a] - return ok -} - -// IsValidForGauge if an AggregationType is valid for Gauge. -func (a AggregationType) IsValidForGauge() bool { - switch a { - case Last, Min, Max, Mean, Count, Sum, SumSq, Stdev: - return true - default: - return false - } -} - -// IsValidForCounter if an AggregationType is valid for Counter. -func (a AggregationType) IsValidForCounter() bool { - switch a { - case Min, Max, Mean, Count, Sum, SumSq, Stdev: - return true - default: - return false - } -} - -// IsValidForTimer if an AggregationType is valid for Timer. -func (a AggregationType) IsValidForTimer() bool { - switch a { - case Last: - return false - default: - return true - } -} - -// Quantile returns the quantile represented by the AggregationType. -func (a AggregationType) Quantile() (float64, bool) { - switch a { - case P10: - return 0.1, true - case P20: - return 0.2, true - case P30: - return 0.3, true - case P40: - return 0.4, true - case P50, Median: - return 0.5, true - case P60: - return 0.6, true - case P70: - return 0.7, true - case P80: - return 0.8, true - case P90: - return 0.9, true - case P95: - return 0.95, true - case P99: - return 0.99, true - case P999: - return 0.999, true - case P9999: - return 0.9999, true - default: - return 0, false - } -} - -// Schema returns the schema of the aggregation type. -func (a AggregationType) Schema() (schema.AggregationType, error) { - s := schema.AggregationType(a) - if err := validateSchemaAggregationType(s); err != nil { - return schema.AggregationType_UNKNOWN, err - } - return s, nil -} - -// UnmarshalYAML unmarshals aggregation type from a string. -func (a *AggregationType) UnmarshalYAML(unmarshal func(interface{}) error) error { - var str string - if err := unmarshal(&str); err != nil { - return err - } - - parsed, err := ParseAggregationType(str) - if err != nil { - return err - } - *a = parsed - return nil -} - -func validateSchemaAggregationType(a schema.AggregationType) error { - _, ok := schema.AggregationType_name[int32(a)] - if !ok { - return fmt.Errorf("invalid schema aggregation type: %v", a) - } - return nil -} - -// ParseAggregationType parses an aggregation type. -func ParseAggregationType(str string) (AggregationType, error) { - aggType, ok := aggregationTypeStringMap[str] - if !ok { - return UnknownAggregationType, fmt.Errorf("invalid aggregation type: %s", str) - } - return aggType, nil -} - -// AggregationTypes is a list of AggregationTypes. -type AggregationTypes []AggregationType - -// NewAggregationTypesFromSchema creates a list of aggregation types from a schema. -func NewAggregationTypesFromSchema(input []schema.AggregationType) (AggregationTypes, error) { - res := make([]AggregationType, len(input)) - for i, t := range input { - aggType, err := NewAggregationTypeFromSchema(t) - if err != nil { - return DefaultAggregationTypes, err - } - res[i] = aggType - } - return res, nil -} - -// UnmarshalYAML unmarshals aggregation types from a string. -func (aggTypes *AggregationTypes) UnmarshalYAML(unmarshal func(interface{}) error) error { - var str string - if err := unmarshal(&str); err != nil { - return err - } - - parsed, err := ParseAggregationTypes(str) - if err != nil { - return err - } - *aggTypes = parsed - return nil -} - -// Contains checks if the given type is contained in the aggregation types. -func (aggTypes AggregationTypes) Contains(aggType AggregationType) bool { - for _, at := range aggTypes { - if at == aggType { - return true - } - } - return false -} - -// IsDefault checks if the AggregationTypes is the default aggregation type. -func (aggTypes AggregationTypes) IsDefault() bool { - return len(aggTypes) == 0 -} - -// String is for debugging. -func (aggTypes AggregationTypes) String() string { - if len(aggTypes) == 0 { - return "" - } - - parts := make([]string, len(aggTypes)) - for i, aggType := range aggTypes { - parts[i] = aggType.String() - } - return strings.Join(parts, aggregationTypesSeparator) -} - -// IsValidForGauge checks if the list of aggregation types is valid for Gauge. -func (aggTypes AggregationTypes) IsValidForGauge() bool { - for _, aggType := range aggTypes { - if !aggType.IsValidForGauge() { - return false - } - } - return true -} - -// IsValidForCounter checks if the list of aggregation types is valid for Counter. -func (aggTypes AggregationTypes) IsValidForCounter() bool { - for _, aggType := range aggTypes { - if !aggType.IsValidForCounter() { - return false - } - } - return true -} - -// IsValidForTimer checks if the list of aggregation types is valid for Timer. -func (aggTypes AggregationTypes) IsValidForTimer() bool { - for _, aggType := range aggTypes { - if !aggType.IsValidForTimer() { - return false - } - } - return true -} - -// PooledQuantiles returns all the quantiles found in the list -// of aggregation types. Using a floats pool if available. -// -// A boolean will also be returned to indicate whether the -// returned float slice is from the pool. -func (aggTypes AggregationTypes) PooledQuantiles(p pool.FloatsPool) ([]float64, bool) { - var ( - res []float64 - initialized bool - medianAdded bool - pooled bool - ) - for _, aggType := range aggTypes { - q, ok := aggType.Quantile() - if !ok { - continue - } - // Dedup P50 and Median. - if aggType == P50 || aggType == Median { - if medianAdded { - continue - } - medianAdded = true - } - if !initialized { - if p == nil { - res = make([]float64, 0, len(aggTypes)) - } else { - res = p.Get(len(aggTypes)) - pooled = true - } - initialized = true - } - res = append(res, q) - } - return res, pooled -} - -// Schema returns the schema of the aggregation types. -func (aggTypes AggregationTypes) Schema() ([]schema.AggregationType, error) { - // This is the same as returning an empty slice from the functionality perspective. - // It makes creating testing fixtures much simpler. - if aggTypes == nil { - return nil, nil - } - - res := make([]schema.AggregationType, len(aggTypes)) - for i, aggType := range aggTypes { - s, err := aggType.Schema() - if err != nil { - return nil, err - } - res[i] = s - } - - return res, nil -} - -// ParseAggregationTypes parses a list of aggregation types in the form of type1,type2,type3. -func ParseAggregationTypes(str string) (AggregationTypes, error) { - parts := strings.Split(str, aggregationTypesSeparator) - res := make(AggregationTypes, len(parts)) - for i := range parts { - aggType, err := ParseAggregationType(parts[i]) - if err != nil { - return nil, err - } - res[i] = aggType - } - return res, nil -} - -// AggregationID represents a compressed view of AggregationTypes. -type AggregationID [AggregationIDLen]uint64 - -// NewAggregationIDFromSchema creates an AggregationID from schema. -func NewAggregationIDFromSchema(input []schema.AggregationType) (AggregationID, error) { - aggTypes, err := NewAggregationTypesFromSchema(input) - if err != nil { - return DefaultAggregationID, err - } - - // TODO(cw): consider pooling these compressors, - // this allocates one extra slice of length one per call. - id, err := NewAggregationIDCompressor().Compress(aggTypes) - if err != nil { - return DefaultAggregationID, err - } - return id, nil -} - -// MustCompressAggregationTypes compresses a list of aggregation types to -// an AggregationID, it panics if an error was encountered. -func MustCompressAggregationTypes(aggTypes ...AggregationType) AggregationID { - res, err := NewAggregationIDCompressor().Compress(aggTypes) - if err != nil { - panic(err.Error()) - } - return res -} - -// IsDefault checks if the AggregationID is the default aggregation type. -func (id AggregationID) IsDefault() bool { - return id == DefaultAggregationID -} - -// Contains checks if the given aggregation type is contained in the aggregation id. -func (id AggregationID) Contains(aggType AggregationType) bool { - if !aggType.IsValid() { - return false - } - idx := int(aggType) >> aggIDBitShift // aggType / 64 - offset := uint(aggType) & aggIDBitMask // aggType % 64 - return (id[idx] & (1 << offset)) > 0 -} - -// AggregationTypes returns the aggregation types defined by the id. -func (id AggregationID) AggregationTypes() (AggregationTypes, error) { - return NewAggregationIDDecompressor().Decompress(id) -} - -// String for debugging. -func (id AggregationID) String() string { - aggTypes, err := id.AggregationTypes() - if err != nil { - return fmt.Sprintf("[invalid AggregationID: %v]", err) - } - return aggTypes.String() -} diff --git a/policy/aggregationtype_string.go b/policy/aggregationtype_string.go deleted file mode 100644 index 18bdb8b..0000000 --- a/policy/aggregationtype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type AggregationType"; DO NOT EDIT - -package policy - -import "fmt" - -const _AggregationType_name = "UnknownLastMinMaxMeanMedianCountSumSumSqStdevP10P20P30P40P50P60P70P80P90P95P99P999P9999" - -var _AggregationType_index = [...]uint8{0, 7, 11, 14, 17, 21, 27, 32, 35, 40, 45, 48, 51, 54, 57, 60, 63, 66, 69, 72, 75, 78, 82, 87} - -func (i AggregationType) String() string { - if i < 0 || i >= AggregationType(len(_AggregationType_index)-1) { - return fmt.Sprintf("AggregationType(%d)", i) - } - return _AggregationType_name[_AggregationType_index[i]:_AggregationType_index[i+1]] -} diff --git a/policy/policy.go b/policy/policy.go index e63032e..5112e63 100644 --- a/policy/policy.go +++ b/policy/policy.go @@ -25,6 +25,7 @@ import ( "strconv" "strings" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/generated/proto/schema" ) @@ -43,12 +44,12 @@ var ( // Policy contains a storage policy and a list of custom aggregation types. type Policy struct { StoragePolicy - AggregationID + AggregationID aggregation.ID } // NewPolicy creates a policy. -func NewPolicy(sp StoragePolicy, aggTypes AggregationID) Policy { - return Policy{StoragePolicy: sp, AggregationID: aggTypes} +func NewPolicy(sp StoragePolicy, aggID aggregation.ID) Policy { + return Policy{StoragePolicy: sp, AggregationID: aggID} } // NewPolicyFromSchema creates a new policy from a schema policy. @@ -62,7 +63,7 @@ func NewPolicyFromSchema(p *schema.Policy) (Policy, error) { return DefaultPolicy, err } - aggID, err := NewAggregationIDFromSchema(p.AggregationTypes) + aggID, err := aggregation.NewIDFromSchema(p.AggregationTypes) if err != nil { return DefaultPolicy, err } @@ -78,7 +79,7 @@ func (p Policy) Schema() (*schema.Policy, error) { return nil, err } - aggTypes, err := NewAggregationIDDecompressor().Decompress(p.AggregationID) + aggTypes, err := aggregation.NewIDDecompressor().Decompress(p.AggregationID) if err != nil { return nil, err } @@ -146,25 +147,25 @@ func ParsePolicy(str string) (Policy, error) { return DefaultPolicy, errInvalidPolicyString } - p, err := ParseStoragePolicy(parts[0]) + sp, err := ParseStoragePolicy(parts[0]) if err != nil { return DefaultPolicy, err } - var id = DefaultAggregationID + var aggID = aggregation.DefaultID if l == 2 { - aggTypes, err := ParseAggregationTypes(parts[1]) + aggTypes, err := aggregation.ParseTypes(parts[1]) if err != nil { return DefaultPolicy, err } - id, err = NewAggregationIDCompressor().Compress(aggTypes) + aggID, err = aggregation.NewIDCompressor().Compress(aggTypes) if err != nil { return DefaultPolicy, err } } - return NewPolicy(p, id), nil + return NewPolicy(sp, aggID), nil } // NewPoliciesFromSchema creates multiple new policies from given schema policies. @@ -217,7 +218,7 @@ func (pr ByResolutionAscRetentionDesc) Less(i, j int) bool { return false } at1, at2 := p1.AggregationID, p2.AggregationID - for k := 0; k < AggregationIDLen; k++ { + for k := 0; k < aggregation.IDLen; k++ { if at1[k] < at2[k] { return true } diff --git a/policy/policy_benchmark_test.go b/policy/policy_benchmark_test.go index 1b3ecab..fe8e15c 100644 --- a/policy/policy_benchmark_test.go +++ b/policy/policy_benchmark_test.go @@ -24,14 +24,15 @@ import ( "testing" "time" + "github.com/m3db/m3metrics/aggregation" xtime "github.com/m3db/m3x/time" ) var ( testNowNanos = time.Now().UnixNano() testPolicies = []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 2*24*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 30*24*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 2*24*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 30*24*time.Hour), aggregation.DefaultID), } ) diff --git a/policy/policy_test.go b/policy/policy_test.go index 7a75036..554b4e9 100644 --- a/policy/policy_test.go +++ b/policy/policy_test.go @@ -25,6 +25,7 @@ import ( "testing" "time" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/generated/proto/schema" xtime "github.com/m3db/m3x/time" @@ -37,9 +38,9 @@ func TestPolicyString(t *testing.T) { p Policy expected string }{ - {p: NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, time.Hour), DefaultAggregationID), expected: "10s:1h"}, - {p: NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), MustCompressAggregationTypes(Mean, P999)), expected: "1m:12h|Mean,P999"}, - {p: NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), MustCompressAggregationTypes(Mean)), expected: "1m:12h|Mean"}, + {p: NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, time.Hour), aggregation.DefaultID), expected: "10s:1h"}, + {p: NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), aggregation.MustCompressTypes(aggregation.Mean, aggregation.P999)), expected: "1m:12h|Mean,P999"}, + {p: NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), aggregation.MustCompressTypes(aggregation.Mean)), expected: "1m:12h|Mean"}, } for _, input := range inputs { require.Equal(t, input.expected, input.p.String()) @@ -53,23 +54,23 @@ func TestPolicyUnmarshalYAML(t *testing.T) { }{ { str: "1s:1h", - expected: NewPolicy(NewStoragePolicy(time.Second, xtime.Second, time.Hour), DefaultAggregationID), + expected: NewPolicy(NewStoragePolicy(time.Second, xtime.Second, time.Hour), aggregation.DefaultID), }, { str: "10s:1d|Mean", - expected: NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), MustCompressAggregationTypes(Mean)), + expected: NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.MustCompressTypes(aggregation.Mean)), }, { str: "60s:24h|Mean,Count", - expected: NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), MustCompressAggregationTypes(Mean, Count)), + expected: NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.MustCompressTypes(aggregation.Mean, aggregation.Count)), }, { str: "1m:1d|Count,Mean", - expected: NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), MustCompressAggregationTypes(Mean, Count)), + expected: NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.MustCompressTypes(aggregation.Mean, aggregation.Count)), }, { str: "1s@1s:1h|P999,P9999", - expected: NewPolicy(NewStoragePolicy(time.Second, xtime.Second, time.Hour), MustCompressAggregationTypes(P999, P9999)), + expected: NewPolicy(NewStoragePolicy(time.Second, xtime.Second, time.Hour), aggregation.MustCompressTypes(aggregation.P999, aggregation.P9999)), }, } for _, input := range inputs { @@ -133,8 +134,8 @@ func TestNewPoliciesFromSchema(t *testing.T) { res, err := NewPoliciesFromSchema(input) require.NoError(t, err) require.Equal(t, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), MustCompressAggregationTypes(Mean, P999)), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 240*time.Hour), MustCompressAggregationTypes(Mean, P9999)), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.MustCompressTypes(aggregation.Mean, aggregation.P999)), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 240*time.Hour), aggregation.MustCompressTypes(aggregation.Mean, aggregation.P9999)), }, res) } @@ -231,15 +232,15 @@ func TestParsePolicyIntoSchema(t *testing.T) { func TestPoliciesByResolutionAsc(t *testing.T) { inputs := []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), AggregationID{100}), - NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), AggregationID{100}), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.ID{100}), + NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.ID{100}), } expected := []Policy{inputs[2], inputs[0], inputs[1], inputs[5], inputs[4], inputs[3], inputs[7], inputs[6], inputs[8]} sort.Sort(ByResolutionAscRetentionDesc(inputs)) diff --git a/policy/staged_policy_test.go b/policy/staged_policy_test.go index 783ddfb..227d610 100644 --- a/policy/staged_policy_test.go +++ b/policy/staged_policy_test.go @@ -25,7 +25,9 @@ import ( "testing" "time" + "github.com/m3db/m3metrics/aggregation" xtime "github.com/m3db/m3x/time" + "github.com/stretchr/testify/require" ) @@ -38,8 +40,8 @@ func TestStagedPoliciesHasDefaultPolicies(t *testing.T) { func TestStagedPoliciesHasCustomPolicies(t *testing.T) { policies := []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), } sp := NewStagedPolicies(testNowNanos, false, policies) require.Equal(t, testNowNanos, sp.CutoverNanos) @@ -63,10 +65,10 @@ func TestStagedPoliciesEquals(t *testing.T) { { sp: [2]StagedPolicies{ NewStagedPolicies(0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), MustCompressAggregationTypes(Min, Max)), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.MustCompressTypes(aggregation.Min, aggregation.Max)), }), NewStagedPolicies(0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), MustCompressAggregationTypes(Max, Min)), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.MustCompressTypes(aggregation.Max, aggregation.Min)), }), }, expected: true, @@ -74,12 +76,12 @@ func TestStagedPoliciesEquals(t *testing.T) { { sp: [2]StagedPolicies{ NewStagedPolicies(0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), aggregation.DefaultID), }), NewStagedPolicies(0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), aggregation.DefaultID), }), }, expected: true, @@ -101,10 +103,10 @@ func TestStagedPoliciesEquals(t *testing.T) { { sp: [2]StagedPolicies{ NewStagedPolicies(0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), MustCompressAggregationTypes(Max)), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.MustCompressTypes(aggregation.Max)), }), NewStagedPolicies(0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), MustCompressAggregationTypes(Last)), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.MustCompressTypes(aggregation.Last)), }), }, expected: false, @@ -112,10 +114,10 @@ func TestStagedPoliciesEquals(t *testing.T) { { sp: [2]StagedPolicies{ NewStagedPolicies(0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), MustCompressAggregationTypes(Max)), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.MustCompressTypes(aggregation.Max)), }), NewStagedPolicies(0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), MustCompressAggregationTypes(Max, Min)), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.MustCompressTypes(aggregation.Max, aggregation.Min)), }), }, expected: false, @@ -124,8 +126,8 @@ func TestStagedPoliciesEquals(t *testing.T) { sp: [2]StagedPolicies{ NewStagedPolicies(0, false, nil), NewStagedPolicies(0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), aggregation.DefaultID), }), }, expected: false, @@ -133,13 +135,13 @@ func TestStagedPoliciesEquals(t *testing.T) { { sp: [2]StagedPolicies{ NewStagedPolicies(1000, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), aggregation.DefaultID), }), NewStagedPolicies(0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 24*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), }), }, expected: false, @@ -147,13 +149,13 @@ func TestStagedPoliciesEquals(t *testing.T) { { sp: [2]StagedPolicies{ NewStagedPolicies(0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 24*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), }), NewStagedPolicies(0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), aggregation.DefaultID), }), }, expected: false, @@ -161,12 +163,12 @@ func TestStagedPoliciesEquals(t *testing.T) { { sp: [2]StagedPolicies{ NewStagedPolicies(0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), aggregation.DefaultID), }), NewStagedPolicies(0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), }), }, expected: false, @@ -200,8 +202,8 @@ func TestStagedPoliciesIsEmpty(t *testing.T) { }, { sp: NewStagedPolicies(0, true, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), aggregation.DefaultID), }), expected: false, }, @@ -226,8 +228,8 @@ func TestPoliciesListIsDefault(t *testing.T) { }, { pl: []StagedPolicies{NewStagedPolicies(0, true, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), aggregation.DefaultID), })}, expected: false, }, @@ -260,15 +262,15 @@ func TestPoliciesListJSONMarshaling(t *testing.T) { 0, false, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), - NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), MustCompressAggregationTypes(Count, Mean)), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.MustCompressTypes(aggregation.Count, aggregation.Mean)), }, ), NewStagedPolicies( 100, true, []Policy{ - NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), MustCompressAggregationTypes(Sum)), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.MustCompressTypes(aggregation.Sum)), }, ), NewStagedPolicies( diff --git a/protocol/msgpack/base_encoder.go b/protocol/msgpack/base_encoder.go index e73069e..863fcfa 100644 --- a/protocol/msgpack/base_encoder.go +++ b/protocol/msgpack/base_encoder.go @@ -21,6 +21,7 @@ package msgpack import ( + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/metric/id" "github.com/m3db/m3metrics/policy" ) @@ -97,14 +98,14 @@ func (enc *baseEncoder) encodePolicyInternal(p policy.Policy) { enc.encodeCompressedAggregationTypes(p.AggregationID) } -func (enc *baseEncoder) encodeCompressedAggregationTypes(aggTypes policy.AggregationID) { +func (enc *baseEncoder) encodeCompressedAggregationTypes(aggTypes aggregation.ID) { if aggTypes.IsDefault() { enc.encodeNumObjectFields(numFieldsForType(defaultAggregationID)) enc.encodeObjectType(defaultAggregationID) return } - if policy.AggregationIDLen == 1 { + if aggregation.IDLen == 1 { enc.encodeNumObjectFields(numFieldsForType(shortAggregationID)) enc.encodeObjectType(shortAggregationID) enc.encodeVarintFn(int64(aggTypes[0])) @@ -114,7 +115,7 @@ func (enc *baseEncoder) encodeCompressedAggregationTypes(aggTypes policy.Aggrega // NB(cw): Only reachable after we start to support more than 63 aggregation types enc.encodeNumObjectFields(numFieldsForType(longAggregationID)) enc.encodeObjectType(longAggregationID) - enc.encodeArrayLen(policy.AggregationIDLen) + enc.encodeArrayLen(aggregation.IDLen) for _, v := range aggTypes { enc.encodeVarint(int64(v)) } diff --git a/protocol/msgpack/base_iterator.go b/protocol/msgpack/base_iterator.go index 5036897..7aa1d28 100644 --- a/protocol/msgpack/base_iterator.go +++ b/protocol/msgpack/base_iterator.go @@ -27,6 +27,7 @@ import ( "io" "time" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/metric/id" "github.com/m3db/m3metrics/policy" xtime "github.com/m3db/m3x/time" @@ -81,7 +82,7 @@ func (it *baseIterator) decodePolicy() policy.Policy { return policy.NewPolicy(sp, aggTypes) } -func (it *baseIterator) decodeCompressedAggregationTypes() policy.AggregationID { +func (it *baseIterator) decodeCompressedAggregationTypes() aggregation.ID { numActualFields := it.decodeNumObjectFields() aggregationEncodeType := it.decodeObjectType() numExpectedFields, ok := it.checkExpectedNumFieldsForType( @@ -89,10 +90,10 @@ func (it *baseIterator) decodeCompressedAggregationTypes() policy.AggregationID numActualFields, ) if !ok { - return policy.DefaultAggregationID + return aggregation.DefaultID } - var aggTypes policy.AggregationID + var aggTypes aggregation.ID switch aggregationEncodeType { case defaultAggregationID: case shortAggregationID: @@ -100,7 +101,7 @@ func (it *baseIterator) decodeCompressedAggregationTypes() policy.AggregationID aggTypes[0] = uint64(value) case longAggregationID: numValues := it.decodeArrayLen() - if numValues > policy.AggregationIDLen { + if numValues > aggregation.IDLen { it.decodeErr = fmt.Errorf("invalid CompressedAggregationType length: %d", numValues) return aggTypes } diff --git a/protocol/msgpack/base_test.go b/protocol/msgpack/base_test.go index 05c592a..88cf7dc 100644 --- a/protocol/msgpack/base_test.go +++ b/protocol/msgpack/base_test.go @@ -4,17 +4,19 @@ import ( "testing" "time" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/policy" xtime "github.com/m3db/m3x/time" + "github.com/stretchr/testify/require" ) func TestAggregationTypesRoundTrip(t *testing.T) { - inputs := []policy.AggregationID{ - policy.DefaultAggregationID, - policy.AggregationID{5}, - policy.AggregationID{100}, - policy.AggregationID{12345}, + inputs := []aggregation.ID{ + aggregation.DefaultID, + aggregation.ID{5}, + aggregation.ID{100}, + aggregation.ID{12345}, } for _, input := range inputs { @@ -29,9 +31,9 @@ func TestAggregationTypesRoundTrip(t *testing.T) { func TestUnaggregatedPolicyRoundTrip(t *testing.T) { inputs := []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*24*time.Hour), policy.AggregationID{8}), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.AggregationID{100}), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*24*time.Hour), aggregation.ID{8}), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.ID{100}), } for _, input := range inputs { diff --git a/protocol/msgpack/unaggregated_encoder_test.go b/protocol/msgpack/unaggregated_encoder_test.go index 6c896f9..291ef43 100644 --- a/protocol/msgpack/unaggregated_encoder_test.go +++ b/protocol/msgpack/unaggregated_encoder_test.go @@ -26,6 +26,7 @@ import ( "testing" "time" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/metric/unaggregated" "github.com/m3db/m3metrics/policy" xtime "github.com/m3db/m3x/time" @@ -190,7 +191,7 @@ func TestUnaggregatedEncodeArrayLenError(t *testing.T) { time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), aggregation.DefaultID), }, ), } @@ -235,7 +236,7 @@ func expectedResultsForUnaggregatedPolicy(p policy.Policy) []interface{} { return append(results, expectedResultsForCompressedAggregationTypes(p.AggregationID)...) } -func expectedResultsForCompressedAggregationTypes(compressed policy.AggregationID) []interface{} { +func expectedResultsForCompressedAggregationTypes(compressed aggregation.ID) []interface{} { results := []interface{}{} if compressed.IsDefault() { diff --git a/protocol/msgpack/unaggregated_iterator_test.go b/protocol/msgpack/unaggregated_iterator_test.go index 5364aee..5bb082a 100644 --- a/protocol/msgpack/unaggregated_iterator_test.go +++ b/protocol/msgpack/unaggregated_iterator_test.go @@ -28,6 +28,7 @@ import ( "testing" "time" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/metric/id" "github.com/m3db/m3metrics/metric/unaggregated" "github.com/m3db/m3metrics/policy" @@ -452,7 +453,7 @@ func TestUnaggregatedIteratorDecodePolicyWithCustomResolution(t *testing.T) { time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(3*time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(3*time.Second, xtime.Second, time.Hour), aggregation.DefaultID), }, ), }, @@ -474,7 +475,7 @@ func TestUnaggregatedIteratorDecodePolicyWithCustomRetention(t *testing.T) { time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, 289*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, 289*time.Hour), aggregation.DefaultID), }, ), }, @@ -496,7 +497,7 @@ func TestUnaggregatedIteratorDecodePolicyMoreFieldsThanExpected(t *testing.T) { time.Now().UnixNano(), true, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), aggregation.DefaultID), }, ), }, diff --git a/protocol/msgpack/unaggregated_roundtrip_test.go b/protocol/msgpack/unaggregated_roundtrip_test.go index 0ba981e..f08aeb3 100644 --- a/protocol/msgpack/unaggregated_roundtrip_test.go +++ b/protocol/msgpack/unaggregated_roundtrip_test.go @@ -28,6 +28,7 @@ import ( "testing" "time" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/metric/unaggregated" "github.com/m3db/m3metrics/policy" xtime "github.com/m3db/m3x/time" @@ -61,9 +62,9 @@ var ( time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), aggregation.DefaultID), }, ), } @@ -73,16 +74,16 @@ var ( time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( time.Now().Add(time.Minute).UnixNano(), true, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), aggregation.DefaultID), }, ), } @@ -92,7 +93,7 @@ var ( time.Now().UnixNano(), true, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Unit(100), time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Unit(100), time.Hour), aggregation.DefaultID), }, ), } @@ -122,9 +123,9 @@ var ( time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), policy.AggregationID{8}), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), aggregation.ID{8}), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), aggregation.DefaultID), }, ), }, @@ -137,7 +138,7 @@ var ( time.Now().UnixNano(), true, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), aggregation.DefaultID), }, ), }, @@ -150,7 +151,7 @@ var ( time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 45*24*time.Hour), policy.AggregationID{100}), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 45*24*time.Hour), aggregation.ID{100}), }, ), }, @@ -165,16 +166,16 @@ var ( time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( time.Now().Add(time.Minute).UnixNano(), true, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), aggregation.DefaultID), }, ), }, @@ -186,14 +187,14 @@ var ( time.Now().UnixNano(), true, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( time.Now().Add(time.Hour).UnixNano(), false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 45*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 45*24*time.Hour), aggregation.DefaultID), }, ), }, @@ -205,14 +206,14 @@ var ( time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 45*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 45*24*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( time.Now().Add(time.Nanosecond).UnixNano(), false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 36*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 36*time.Hour), aggregation.DefaultID), }, ), }, @@ -297,8 +298,8 @@ func TestUnaggregatedEncodeDecodeMetricWithPoliciesListStress(t *testing.T) { time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), aggregation.DefaultID), }, ), }, @@ -307,16 +308,16 @@ func TestUnaggregatedEncodeDecodeMetricWithPoliciesListStress(t *testing.T) { time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( time.Now().Add(time.Minute).UnixNano(), true, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( diff --git a/rules/mapping_test.go b/rules/mapping_test.go index 8a3719a..688555d 100644 --- a/rules/mapping_test.go +++ b/rules/mapping_test.go @@ -24,6 +24,7 @@ import ( "testing" "time" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/errors" "github.com/m3db/m3metrics/generated/proto/schema" "github.com/m3db/m3metrics/policy" @@ -99,7 +100,7 @@ var ( func TestNewMappingRuleSnapshotFromSchema(t *testing.T) { res, err := newMappingRuleSnapshot(testMappingRuleSchema.Snapshots[0], testTagsFilterOptions()) expectedPolicies := []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.MustCompressAggregationTypes(policy.P999)), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.MustCompressTypes(aggregation.P999)), } require.NoError(t, err) require.Equal(t, "foo", res.name) @@ -138,8 +139,8 @@ func TestNewMappingRuleSnapshotFromFields(t *testing.T) { cutoverNanos = int64(12345) rawFilter = "tagName1:tagValue1 tagName2:tagValue2" policies = []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), } lastUpdatedAtNanos = int64(67890) lastUpdatedBy = "testUser" @@ -215,8 +216,8 @@ func TestNewMappingRule(t *testing.T) { tombstoned: true, cutoverNanos: 67890, policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, }, } @@ -290,7 +291,7 @@ func TestNewMappingRuleFromFields(t *testing.T) { mr, err := newMappingRuleFromFields( "bar", rawFilter, - []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID)}, + []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, time.Hour), aggregation.DefaultID)}, UpdateMetadata{12345, 12345, "test_user"}, ) require.NoError(t, err) @@ -300,7 +301,7 @@ func TestNewMappingRuleFromFields(t *testing.T) { cutoverNanos: 12345, filter: nil, rawFilter: rawFilter, - policies: []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID)}, + policies: []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, time.Hour), aggregation.DefaultID)}, } require.NoError(t, err) @@ -346,7 +347,7 @@ func TestMappingRuleMarkTombstoned(t *testing.T) { require.NoError(t, err) expectedPolicies := []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.MustCompressAggregationTypes(policy.P999)), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.MustCompressTypes(aggregation.P999)), } require.Equal(t, 1, len(mr.snapshots)) lastSnapshot := mr.snapshots[0] diff --git a/rules/options.go b/rules/options.go index fdc46d1..4b7dcde 100644 --- a/rules/options.go +++ b/rules/options.go @@ -21,9 +21,9 @@ package rules import ( + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/filters" "github.com/m3db/m3metrics/metric/id" - "github.com/m3db/m3metrics/policy" ) // Options provide a set of options for rule matching. @@ -47,23 +47,23 @@ type Options interface { IsRollupIDFn() id.MatchIDFn // SetAggregationTypesOptions sets the aggregation types options. - SetAggregationTypesOptions(v policy.AggregationTypesOptions) Options + SetAggregationTypesOptions(v aggregation.TypesOptions) Options // PolicyOptions returns the aggregation types options. - AggregationTypesOptions() policy.AggregationTypesOptions + AggregationTypesOptions() aggregation.TypesOptions } type options struct { tagsFilterOpts filters.TagsFilterOptions newRollupIDFn id.NewIDFn isRollupIDFn id.MatchIDFn - aggTypesOpts policy.AggregationTypesOptions + aggTypesOpts aggregation.TypesOptions } // NewOptions creates a new set of options. func NewOptions() Options { return &options{ - aggTypesOpts: policy.NewAggregationTypesOptions(), + aggTypesOpts: aggregation.NewTypesOptions(), } } @@ -97,12 +97,12 @@ func (o *options) IsRollupIDFn() id.MatchIDFn { return o.isRollupIDFn } -func (o *options) SetAggregationTypesOptions(value policy.AggregationTypesOptions) Options { +func (o *options) SetAggregationTypesOptions(value aggregation.TypesOptions) Options { opts := *o opts.aggTypesOpts = value return &opts } -func (o *options) AggregationTypesOptions() policy.AggregationTypesOptions { +func (o *options) AggregationTypesOptions() aggregation.TypesOptions { return o.aggTypesOpts } diff --git a/rules/result_test.go b/rules/result_test.go index 8b8c02c..fe458b7 100644 --- a/rules/result_test.go +++ b/rules/result_test.go @@ -24,6 +24,7 @@ import ( "testing" "time" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/policy" xtime "github.com/m3db/m3x/time" @@ -44,17 +45,17 @@ func TestMatchResult(t *testing.T) { 12345, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 23456, true, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 10*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(2*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 10*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(2*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), } @@ -66,17 +67,17 @@ func TestMatchResult(t *testing.T) { 12345, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 23456, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 10*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(2*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 10*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(2*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), }, @@ -120,8 +121,8 @@ func TestMatchResult(t *testing.T) { 23456, true, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 10*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(2*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 10*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(2*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), }, @@ -133,8 +134,8 @@ func TestMatchResult(t *testing.T) { 23456, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 10*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(2*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 10*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(2*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), }, diff --git a/rules/rollup_test.go b/rules/rollup_test.go index 01e907c..587158b 100644 --- a/rules/rollup_test.go +++ b/rules/rollup_test.go @@ -24,6 +24,7 @@ import ( "testing" "time" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/errors" "github.com/m3db/m3metrics/generated/proto/schema" "github.com/m3db/m3metrics/policy" @@ -122,7 +123,7 @@ func TestNewRollupTargetNilPolicySchema(t *testing.T) { func TestRollupTargetSameTransform(t *testing.T) { policies := []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*24*time.Hour), aggregation.DefaultID), } target := RollupTarget{Name: b("foo"), Tags: bs("bar1", "bar2")} inputs := []testRollupTargetData{ @@ -162,7 +163,7 @@ func TestRollupTargetSameTransform(t *testing.T) { func TestRollupTargetClone(t *testing.T) { policies := []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*24*time.Hour), aggregation.DefaultID), } target := RollupTarget{Name: b("foo"), Tags: bs("bar1", "bar2"), Policies: policies} cloned := target.clone() @@ -184,7 +185,7 @@ func TestNewRollupRuleSnapshotFromSchema(t *testing.T) { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, }, } @@ -229,7 +230,7 @@ func TestNewRollupRuleSnapshotFromFields(t *testing.T) { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, }, } @@ -304,7 +305,7 @@ func TestRollupRuleValidSchema(t *testing.T) { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, }, }, @@ -318,7 +319,7 @@ func TestRollupRuleValidSchema(t *testing.T) { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), compressedMean), }, }, @@ -402,7 +403,7 @@ func TestNewRollupRuleFromFields(t *testing.T) { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, }, }, @@ -420,7 +421,7 @@ func TestNewRollupRuleFromFields(t *testing.T) { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, }, }, @@ -473,7 +474,7 @@ func TestRollupRuleMarkTombstoned(t *testing.T) { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, }, } diff --git a/rules/ruleset.go b/rules/ruleset.go index e668c2f..ba83f1f 100644 --- a/rules/ruleset.go +++ b/rules/ruleset.go @@ -29,6 +29,7 @@ import ( "time" "github.com/m3db/m3cluster/kv" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/filters" "github.com/m3db/m3metrics/generated/proto/schema" "github.com/m3db/m3metrics/metric" @@ -59,7 +60,7 @@ type Matcher interface { // ReverseMatch reverse matches the applicable policies for a metric id between [fromNanos, toNanos), // with aware of the metric type and aggregation type for the given id. - ReverseMatch(id []byte, fromNanos, toNanos int64, mt metric.Type, at policy.AggregationType) MatchResult + ReverseMatch(id []byte, fromNanos, toNanos int64, mt metric.Type, at aggregation.Type) MatchResult } type activeRuleSet struct { @@ -70,7 +71,7 @@ type activeRuleSet struct { tagFilterOpts filters.TagsFilterOptions newRollupIDFn metricID.NewIDFn isRollupIDFn metricID.MatchIDFn - aggTypeOpts policy.AggregationTypesOptions + aggTypeOpts aggregation.TypesOptions } func newActiveRuleSet( @@ -80,7 +81,7 @@ func newActiveRuleSet( tagFilterOpts filters.TagsFilterOptions, newRollupIDFn metricID.NewIDFn, isRollupIDFn metricID.MatchIDFn, - aggOpts policy.AggregationTypesOptions, + aggOpts aggregation.TypesOptions, ) *activeRuleSet { uniqueCutoverTimes := make(map[int64]struct{}) for _, mappingRule := range mappingRules { @@ -141,7 +142,8 @@ func (as *activeRuleSet) ForwardMatch( func (as *activeRuleSet) ReverseMatch( id []byte, fromNanos, toNanos int64, - mt metric.Type, at policy.AggregationType, + mt metric.Type, + at aggregation.Type, ) MatchResult { var ( nextIdx = as.nextCutoverIdx(fromNanos) @@ -177,7 +179,7 @@ func (as *activeRuleSet) reverseMappingsFor( isRollupID bool, timeNanos int64, mt metric.Type, - at policy.AggregationType, + at aggregation.Type, ) (policy.StagedPolicies, bool) { if !isRollupID { return as.reverseMappingsForNonRollupID(id, timeNanos, mt, at) @@ -200,7 +202,8 @@ func (as *activeRuleSet) reverseMappingsFor( func (as *activeRuleSet) reverseMappingsForRollupID( name, tags []byte, timeNanos int64, - mt metric.Type, at policy.AggregationType, + mt metric.Type, + at aggregation.Type, ) (policy.StagedPolicies, bool) { for _, rollupRule := range as.rollupRules { snapshot := rollupRule.ActiveSnapshot(timeNanos) @@ -255,7 +258,7 @@ func (as *activeRuleSet) reverseMappingsForNonRollupID( id []byte, timeNanos int64, mt metric.Type, - at policy.AggregationType, + at aggregation.Type, ) (policy.StagedPolicies, bool) { policies, cutoverNanos := as.mappingsForNonRollupID(id, timeNanos) // NB(cw) aggregation types filter must be applied after the policy list is resolved. @@ -544,7 +547,7 @@ type ruleSet struct { tagsFilterOpts filters.TagsFilterOptions newRollupIDFn metricID.NewIDFn isRollupIDFn metricID.MatchIDFn - aggOpts policy.AggregationTypesOptions + aggTypesOpts aggregation.TypesOptions } // NewRuleSetFromSchema creates a new RuleSet from a schema object. @@ -583,7 +586,7 @@ func NewRuleSetFromSchema(version int, rs *schema.RuleSet, opts Options) (RuleSe tagsFilterOpts: tagsFilterOpts, newRollupIDFn: opts.NewRollupIDFn(), isRollupIDFn: opts.IsRollupIDFn(), - aggOpts: opts.AggregationTypesOptions(), + aggTypesOpts: opts.AggregationTypesOptions(), }, nil } @@ -596,7 +599,7 @@ func NewEmptyRuleSet(namespaceName string, meta UpdateMetadata) MutableRuleSet { tombstoned: false, mappingRules: make([]*mappingRule, 0), rollupRules: make([]*rollupRule, 0), - aggOpts: policy.NewAggregationTypesOptions(), + aggTypesOpts: aggregation.NewTypesOptions(), } rs.updateMetadata(meta) return rs @@ -627,7 +630,7 @@ func (rs *ruleSet) ActiveSet(timeNanos int64) Matcher { rs.tagsFilterOpts, rs.newRollupIDFn, rs.isRollupIDFn, - rs.aggOpts, + rs.aggTypesOpts, ) } @@ -744,7 +747,7 @@ func (rs *ruleSet) Clone() MutableRuleSet { tagsFilterOpts: rs.tagsFilterOpts, newRollupIDFn: rs.newRollupIDFn, isRollupIDFn: rs.isRollupIDFn, - aggOpts: rs.aggOpts, + aggTypesOpts: rs.aggTypesOpts, }) } @@ -1007,14 +1010,19 @@ func resolvePolicies(policies []policy.Policy) []policy.Policy { // is contained in the default aggregation types for the metric type. // * If the policy contains custom aggregation types and the given aggregation type // is contained in the custom aggregation type. -func filterPoliciesWithAggregationTypes(ps []policy.Policy, mt metric.Type, at policy.AggregationType, opts policy.AggregationTypesOptions) ([]policy.Policy, bool) { +func filterPoliciesWithAggregationTypes( + ps []policy.Policy, + mt metric.Type, + at aggregation.Type, + opts aggregation.TypesOptions, +) ([]policy.Policy, bool) { if policy.IsDefaultPolicies(ps) { return nil, opts.IsContainedInDefaultAggregationTypes(at, mt) } var cur int for i := 0; i < len(ps); i++ { - if !containsAggType(ps[i].AggregationID, mt, at, opts) { + if !containsAggregationType(ps[i].AggregationID, mt, at, opts) { continue } // NB: The policy does not match the aggregation type and should be removed, @@ -1027,7 +1035,12 @@ func filterPoliciesWithAggregationTypes(ps []policy.Policy, mt metric.Type, at p return ps[:cur], false } -func containsAggType(aggID policy.AggregationID, mt metric.Type, at policy.AggregationType, opts policy.AggregationTypesOptions) bool { +func containsAggregationType( + aggID aggregation.ID, + mt metric.Type, + at aggregation.Type, + opts aggregation.TypesOptions, +) bool { if aggID.IsDefault() { return opts.IsContainedInDefaultAggregationTypes(at, mt) } diff --git a/rules/ruleset_test.go b/rules/ruleset_test.go index d5b5ee4..959382a 100644 --- a/rules/ruleset_test.go +++ b/rules/ruleset_test.go @@ -27,6 +27,7 @@ import ( "testing" "time" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/errors" "github.com/m3db/m3metrics/filters" "github.com/m3db/m3metrics/generated/proto/schema" @@ -40,12 +41,12 @@ import ( ) var ( - compressor = policy.NewAggregationIDCompressor() - compressedMax = compressor.MustCompress(policy.AggregationTypes{policy.Max}) - compressedCount = compressor.MustCompress(policy.AggregationTypes{policy.Count}) - compressedMin = compressor.MustCompress(policy.AggregationTypes{policy.Min}) - compressedMean = compressor.MustCompress(policy.AggregationTypes{policy.Mean}) - compressedP999 = compressor.MustCompress(policy.AggregationTypes{policy.P999}) + compressor = aggregation.NewIDCompressor() + compressedMax = compressor.MustCompress(aggregation.Types{aggregation.Max}) + compressedCount = compressor.MustCompress(aggregation.Types{aggregation.Count}) + compressedMin = compressor.MustCompress(aggregation.Types{aggregation.Min}) + compressedMean = compressor.MustCompress(aggregation.Types{aggregation.Mean}) + compressedP999 = compressor.MustCompress(aggregation.Types{aggregation.P999}) now = time.Now().UnixNano() testUser = "test_user" @@ -63,13 +64,13 @@ func TestActiveRuleSetForwardMappingPoliciesForNonRollupID(t *testing.T) { 22000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), }, @@ -84,9 +85,9 @@ func TestActiveRuleSetForwardMappingPoliciesForNonRollupID(t *testing.T) { 35000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, ), }, @@ -101,7 +102,7 @@ func TestActiveRuleSetForwardMappingPoliciesForNonRollupID(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, ), }, @@ -139,52 +140,52 @@ func TestActiveRuleSetForwardMappingPoliciesForNonRollupID(t *testing.T) { false, []policy.Policy{ policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), compressedMean), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 22000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 30000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 34000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 35000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, ), }, @@ -200,7 +201,7 @@ func TestActiveRuleSetForwardMappingPoliciesForNonRollupID(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, ), }, @@ -215,7 +216,7 @@ func TestActiveRuleSetForwardMappingPoliciesForNonRollupID(t *testing.T) { testTagsFilterOptions(), mockNewID, nil, - policy.NewAggregationTypesOptions(), + aggregation.NewTypesOptions(), ) expectedCutovers := []int64{10000, 15000, 20000, 22000, 24000, 30000, 34000, 35000, 100000} require.Equal(t, expectedCutovers, as.cutoverTimesAsc) @@ -234,19 +235,19 @@ func TestActiveRuleSetReverseMappingPoliciesForNonRollupID(t *testing.T) { matchTo: 25001, expireAtNanos: 30000, metricType: metric.CounterType, - aggregationType: policy.Sum, + aggregationType: aggregation.Sum, result: policy.PoliciesList{ policy.NewStagedPolicies( 22000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), }, @@ -257,15 +258,15 @@ func TestActiveRuleSetReverseMappingPoliciesForNonRollupID(t *testing.T) { matchTo: 35001, expireAtNanos: 100000, metricType: metric.CounterType, - aggregationType: policy.Sum, + aggregationType: aggregation.Sum, result: policy.PoliciesList{ policy.NewStagedPolicies( 35000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, ), }, @@ -276,13 +277,13 @@ func TestActiveRuleSetReverseMappingPoliciesForNonRollupID(t *testing.T) { matchTo: 25001, expireAtNanos: 30000, metricType: metric.CounterType, - aggregationType: policy.Sum, + aggregationType: aggregation.Sum, result: policy.PoliciesList{ policy.NewStagedPolicies( 24000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, ), }, @@ -293,7 +294,7 @@ func TestActiveRuleSetReverseMappingPoliciesForNonRollupID(t *testing.T) { matchTo: 25001, expireAtNanos: 30000, metricType: metric.CounterType, - aggregationType: policy.Sum, + aggregationType: aggregation.Sum, result: policy.DefaultPoliciesList, }, { @@ -302,7 +303,7 @@ func TestActiveRuleSetReverseMappingPoliciesForNonRollupID(t *testing.T) { matchTo: 25001, expireAtNanos: 30000, metricType: metric.CounterType, - aggregationType: policy.Min, + aggregationType: aggregation.Min, result: nil, }, { @@ -311,7 +312,7 @@ func TestActiveRuleSetReverseMappingPoliciesForNonRollupID(t *testing.T) { matchTo: 12000, expireAtNanos: 15000, metricType: metric.CounterType, - aggregationType: policy.Sum, + aggregationType: aggregation.Sum, result: nil, }, { @@ -320,15 +321,15 @@ func TestActiveRuleSetReverseMappingPoliciesForNonRollupID(t *testing.T) { matchTo: 21000, expireAtNanos: 22000, metricType: metric.CounterType, - aggregationType: policy.Sum, + aggregationType: aggregation.Sum, result: policy.PoliciesList{ policy.NewStagedPolicies( 20000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), }, @@ -339,7 +340,7 @@ func TestActiveRuleSetReverseMappingPoliciesForNonRollupID(t *testing.T) { matchTo: 40000, expireAtNanos: 100000, metricType: metric.TimerType, - aggregationType: policy.Count, + aggregationType: aggregation.Count, result: policy.PoliciesList{ policy.NewStagedPolicies( 10000, @@ -360,52 +361,52 @@ func TestActiveRuleSetReverseMappingPoliciesForNonRollupID(t *testing.T) { 20000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 22000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 30000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 34000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 35000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, ), }, @@ -416,14 +417,14 @@ func TestActiveRuleSetReverseMappingPoliciesForNonRollupID(t *testing.T) { matchTo: 40000, expireAtNanos: 100000, metricType: metric.CounterType, - aggregationType: policy.Sum, + aggregationType: aggregation.Sum, result: policy.PoliciesList{ policy.DefaultStagedPolicies, policy.NewStagedPolicies( 24000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, ), }, @@ -438,7 +439,7 @@ func TestActiveRuleSetReverseMappingPoliciesForNonRollupID(t *testing.T) { testTagsFilterOptions(), mockNewID, func([]byte, []byte) bool { return false }, - policy.NewAggregationTypesOptions(), + aggregation.NewTypesOptions(), ) expectedCutovers := []int64{10000, 15000, 20000, 22000, 24000, 30000, 34000, 35000, 100000} require.Equal(t, expectedCutovers, as.cutoverTimesAsc) @@ -457,13 +458,13 @@ func TestActiveRuleSetReverseMappingPoliciesForRollupID(t *testing.T) { matchTo: 25001, expireAtNanos: 30000, metricType: metric.CounterType, - aggregationType: policy.Sum, + aggregationType: aggregation.Sum, result: policy.PoliciesList{ policy.NewStagedPolicies( 24000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), aggregation.DefaultID), }, ), }, @@ -474,7 +475,7 @@ func TestActiveRuleSetReverseMappingPoliciesForRollupID(t *testing.T) { matchTo: 25001, expireAtNanos: 30000, metricType: metric.CounterType, - aggregationType: policy.Min, + aggregationType: aggregation.Min, result: nil, }, { @@ -483,7 +484,7 @@ func TestActiveRuleSetReverseMappingPoliciesForRollupID(t *testing.T) { matchTo: 25001, expireAtNanos: 30000, metricType: metric.UnknownType, - aggregationType: policy.UnknownAggregationType, + aggregationType: aggregation.UnknownType, result: nil, }, { @@ -492,13 +493,13 @@ func TestActiveRuleSetReverseMappingPoliciesForRollupID(t *testing.T) { matchTo: 25001, expireAtNanos: 30000, metricType: metric.TimerType, - aggregationType: policy.P99, + aggregationType: aggregation.P99, result: policy.PoliciesList{ policy.NewStagedPolicies( 24000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), aggregation.DefaultID), }, ), }, @@ -509,7 +510,7 @@ func TestActiveRuleSetReverseMappingPoliciesForRollupID(t *testing.T) { matchTo: 25001, expireAtNanos: 30000, metricType: metric.CounterType, - aggregationType: policy.Sum, + aggregationType: aggregation.Sum, result: nil, }, { @@ -525,14 +526,14 @@ func TestActiveRuleSetReverseMappingPoliciesForRollupID(t *testing.T) { matchTo: 130001, expireAtNanos: math.MaxInt64, metricType: metric.TimerType, - aggregationType: policy.P99, + aggregationType: aggregation.P99, result: policy.PoliciesList{ policy.NewStagedPolicies( 120000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.MustCompressAggregationTypes(policy.Sum, policy.P90, policy.P99)), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Second, 10*time.Hour), policy.MustCompressAggregationTypes(policy.Sum, policy.P99)), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), aggregation.MustCompressTypes(aggregation.Sum, aggregation.P90, aggregation.P99)), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Second, 10*time.Hour), aggregation.MustCompressTypes(aggregation.Sum, aggregation.P99)), }, ), }, @@ -543,13 +544,13 @@ func TestActiveRuleSetReverseMappingPoliciesForRollupID(t *testing.T) { matchTo: 130001, expireAtNanos: math.MaxInt64, metricType: metric.TimerType, - aggregationType: policy.P90, + aggregationType: aggregation.P90, result: policy.PoliciesList{ policy.NewStagedPolicies( 120000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.MustCompressAggregationTypes(policy.Sum, policy.P90, policy.P99)), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), aggregation.MustCompressTypes(aggregation.Sum, aggregation.P90, aggregation.P99)), }, ), }, @@ -560,7 +561,7 @@ func TestActiveRuleSetReverseMappingPoliciesForRollupID(t *testing.T) { matchTo: 130001, expireAtNanos: math.MaxInt64, metricType: metric.GaugeType, - aggregationType: policy.Last, + aggregationType: aggregation.Last, result: nil, }, } @@ -573,7 +574,7 @@ func TestActiveRuleSetReverseMappingPoliciesForRollupID(t *testing.T) { testTagsFilterOptions(), mockNewID, func([]byte, []byte) bool { return true }, - policy.NewAggregationTypesOptions(), + aggregation.NewTypesOptions(), ) expectedCutovers := []int64{10000, 15000, 20000, 22000, 24000, 30000, 34000, 35000, 38000, 100000, 120000} require.Equal(t, expectedCutovers, as.cutoverTimesAsc) @@ -600,13 +601,13 @@ func TestActiveRuleSetRollupResults(t *testing.T) { 22000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), }, @@ -618,7 +619,7 @@ func TestActiveRuleSetRollupResults(t *testing.T) { 22000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, ), }, @@ -638,7 +639,7 @@ func TestActiveRuleSetRollupResults(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), aggregation.DefaultID), }, ), }, @@ -665,77 +666,77 @@ func TestActiveRuleSetRollupResults(t *testing.T) { 10000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 15000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 20000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 22000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 30000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 34000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 35000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(45*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(45*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 38000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(45*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(45*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), }, ), }, @@ -747,14 +748,14 @@ func TestActiveRuleSetRollupResults(t *testing.T) { 22000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( 30000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, ), policy.NewStagedPolicies( @@ -776,7 +777,7 @@ func TestActiveRuleSetRollupResults(t *testing.T) { testTagsFilterOptions(), mockNewID, nil, - policy.NewAggregationTypesOptions(), + aggregation.NewTypesOptions(), ) expectedCutovers := []int64{10000, 15000, 20000, 22000, 24000, 30000, 34000, 35000, 38000, 100000, 120000} require.Equal(t, expectedCutovers, as.cutoverTimesAsc) @@ -867,12 +868,12 @@ func TestRuleSetActiveSet(t *testing.T) { false, []policy.Policy{ policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), compressedMax), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), compressedMin), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), }, @@ -887,9 +888,9 @@ func TestRuleSetActiveSet(t *testing.T) { 35000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, ), }, @@ -931,13 +932,13 @@ func TestRuleSetActiveSet(t *testing.T) { 22000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, ), }, @@ -949,7 +950,7 @@ func TestRuleSetActiveSet(t *testing.T) { 22000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, ), }, @@ -969,7 +970,7 @@ func TestRuleSetActiveSet(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), aggregation.DefaultID), }, ), }, @@ -998,9 +999,9 @@ func TestRuleSetActiveSet(t *testing.T) { 35000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, ), }, @@ -1042,9 +1043,9 @@ func TestRuleSetActiveSet(t *testing.T) { 35000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, ), }, @@ -1064,7 +1065,7 @@ func TestRuleSetActiveSet(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), aggregation.DefaultID), }, ), }, @@ -1093,10 +1094,10 @@ func TestRuleSetActiveSet(t *testing.T) { 100000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, ), }, @@ -1138,9 +1139,9 @@ func TestRuleSetActiveSet(t *testing.T) { 100000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, ), }, @@ -1152,7 +1153,7 @@ func TestRuleSetActiveSet(t *testing.T) { 100000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, ), }, @@ -1172,7 +1173,7 @@ func TestRuleSetActiveSet(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), aggregation.DefaultID), }, ), }, @@ -1236,7 +1237,7 @@ func TestRuleSetLatest(t *testing.T) { CutoverNanos: 30000, Filter: "mtagName1:mtagValue1", Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), }, }, "mappingRule3": &MappingRuleView{ @@ -1246,8 +1247,8 @@ func TestRuleSetLatest(t *testing.T) { CutoverNanos: 34000, Filter: "mtagName1:mtagValue1", Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, }, "mappingRule4": &MappingRuleView{ @@ -1257,7 +1258,7 @@ func TestRuleSetLatest(t *testing.T) { CutoverNanos: 24000, Filter: "mtagName1:mtagValue2", Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.MustCompressAggregationTypes(policy.P999)), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.MustCompressTypes(aggregation.P999)), }, }, "mappingRule5": &MappingRuleView{ @@ -1269,7 +1270,7 @@ func TestRuleSetLatest(t *testing.T) { LastUpdatedBy: "test", Filter: "mtagName1:mtagValue1", Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, }, }, @@ -1285,7 +1286,7 @@ func TestRuleSetLatest(t *testing.T) { Name: "rName1", Tags: []string{"rtagName1", "rtagName2"}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), }, }, }, @@ -1301,8 +1302,8 @@ func TestRuleSetLatest(t *testing.T) { Name: "rName1", Tags: []string{"rtagName1", "rtagName2"}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, }, }, @@ -1318,7 +1319,7 @@ func TestRuleSetLatest(t *testing.T) { Name: "rName3", Tags: []string{"rtagName1", "rtagName2"}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, }, }, @@ -1334,7 +1335,7 @@ func TestRuleSetLatest(t *testing.T) { Name: "rName4", Tags: []string{"rtagName1"}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), aggregation.DefaultID), }, }, }, @@ -1350,7 +1351,7 @@ func TestRuleSetLatest(t *testing.T) { Name: "rName3", Tags: []string{"rtagName1", "rtagName2"}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, }, }, @@ -1401,9 +1402,9 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 20000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, }, &mappingRuleSnapshot{ @@ -1412,7 +1413,7 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 30000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), }, }, }, @@ -1436,8 +1437,8 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 22000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, }, &mappingRuleSnapshot{ @@ -1446,8 +1447,8 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 35000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, }, }, @@ -1462,9 +1463,9 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 22000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, }, &mappingRuleSnapshot{ @@ -1473,8 +1474,8 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 34000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, }, }, @@ -1489,7 +1490,7 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 24000, filter: filter2, policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, }, }, @@ -1504,7 +1505,7 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 100000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, }, }, @@ -1545,7 +1546,7 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, }, }, @@ -1560,9 +1561,9 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, }, }, @@ -1577,7 +1578,7 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), aggregation.DefaultID), }, }, }, @@ -1598,7 +1599,7 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), }, }, }, @@ -1613,8 +1614,8 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, }, }, @@ -1629,7 +1630,7 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(45*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(45*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), }, }, }, @@ -1650,16 +1651,16 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), aggregation.DefaultID), }, }, { Name: b("rName2"), Tags: [][]byte{b("rtagName1")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), aggregation.DefaultID), }, }, }, @@ -1674,8 +1675,8 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, }, }, @@ -1690,8 +1691,8 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), aggregation.DefaultID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, }, }, @@ -1712,7 +1713,7 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName3"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, }, }, @@ -1735,7 +1736,7 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName4"), Tags: [][]byte{b("rtagName1")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Minute), aggregation.DefaultID), }, }, }, @@ -1756,7 +1757,7 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName3"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID), }, }, }, @@ -1779,8 +1780,8 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName5"), Tags: [][]byte{b("rtagName1")}, Policies: []policy.Policy{ - policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.MustCompressAggregationTypes(policy.Sum, policy.P90, policy.P99)), - policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Second, 10*time.Hour), policy.MustCompressAggregationTypes(policy.Sum, policy.P99)), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), aggregation.MustCompressTypes(aggregation.Sum, aggregation.P90, aggregation.P99)), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Second, 10*time.Hour), aggregation.MustCompressTypes(aggregation.Sum, aggregation.P99)), }, }, }, @@ -2616,7 +2617,7 @@ func TestAddMappingRule(t *testing.T) { require.Equal(t, err, errRuleNotFound) newFilter := "tag1:value tag2:value" - p := []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID)} + p := []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID)} view := MappingRuleView{ Name: "foo", Filter: newFilter, @@ -2647,7 +2648,7 @@ func TestAddMappingRuleInvalidFilter(t *testing.T) { view := MappingRuleView{ Name: "testInvalidFilter", Filter: "tag1:value1 tag2:abc[def", - Policies: []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID)}, + Policies: []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID)}, } newID, err := mutable.AddMappingRule(view, helper.NewUpdateMetadata(time.Now().UnixNano(), testUser)) require.Empty(t, newID) @@ -2666,7 +2667,7 @@ func TestAddMappingRuleDup(t *testing.T) { require.NotNil(t, m) newFilter := "tag1:value tag2:value" - p := []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID)} + p := []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID)} view := MappingRuleView{ Name: "mappingRule5.snapshot1", Filter: newFilter, @@ -2695,7 +2696,7 @@ func TestAddMappingRuleRevive(t *testing.T) { require.NoError(t, err) newFilter := "test:bar" - p := []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID)} + p := []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID)} view := MappingRuleView{ Name: "mappingRule5.snapshot1", Filter: newFilter, @@ -2736,7 +2737,7 @@ func TestUpdateMappingRule(t *testing.T) { require.Contains(t, mrs, "mappingRule5") newFilter := "tag1:value tag2:value" - p := []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID)} + p := []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID)} view := MappingRuleView{ ID: "mappingRule5", Name: "foo", @@ -2795,7 +2796,7 @@ func TestAddRollupRule(t *testing.T) { require.Equal(t, err, errRuleNotFound) newFilter := "tag1:value tag2:value" - p := []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID)} + p := []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID)} newTargets := []RollupTargetView{ RollupTargetView{ @@ -2837,7 +2838,7 @@ func TestAddRollupRuleDup(t *testing.T) { require.NoError(t, err) require.NotNil(t, r) - p := []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID)} + p := []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID)} newTargets := []RollupTargetView{ RollupTargetView{ @@ -2915,7 +2916,7 @@ func TestUpdateRollupRule(t *testing.T) { require.NoError(t, err) newFilter := "tag1:value tag2:value" - p := []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID)} + p := []policy.Policy{policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), aggregation.DefaultID)} newTargets := []RollupTargetView{ RollupTargetView{ Name: "blah", @@ -3037,7 +3038,7 @@ type testMappingsData struct { expireAtNanos int64 result policy.PoliciesList metricType metric.Type - aggregationType policy.AggregationType + aggregationType aggregation.Type } type testRollupResultsData struct { diff --git a/rules/validator/config.go b/rules/validator/config.go index 5c494a6..93b30f2 100644 --- a/rules/validator/config.go +++ b/rules/validator/config.go @@ -24,6 +24,7 @@ import ( "errors" "github.com/m3db/m3cluster/client" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/filters" "github.com/m3db/m3metrics/metric" "github.com/m3db/m3metrics/policy" @@ -141,8 +142,8 @@ type policiesOverrideConfiguration struct { // policiesConfiguration is the configuration for storage policies and aggregation types. type policiesConfiguration struct { - StoragePolicies []policy.StoragePolicy `yaml:"storagePolicies"` - AggregationTypes []policy.AggregationType `yaml:"aggregationTypes"` + StoragePolicies []policy.StoragePolicy `yaml:"storagePolicies"` + AggregationTypes []aggregation.Type `yaml:"aggregationTypes"` } func toRunes(s string) []rune { diff --git a/rules/validator/options.go b/rules/validator/options.go index 406c13f..f052248 100644 --- a/rules/validator/options.go +++ b/rules/validator/options.go @@ -24,6 +24,7 @@ import ( "fmt" "strconv" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/filters" "github.com/m3db/m3metrics/metric" "github.com/m3db/m3metrics/policy" @@ -47,14 +48,14 @@ type Options interface { // SetDefaultAllowedCustomAggregationTypes sets the default list of allowed custom // aggregation types. - SetDefaultAllowedCustomAggregationTypes(value policy.AggregationTypes) Options + SetDefaultAllowedCustomAggregationTypes(value aggregation.Types) Options // SetAllowedStoragePoliciesFor sets the list of allowed storage policies for a given metric type. SetAllowedStoragePoliciesFor(t metric.Type, policies []policy.StoragePolicy) Options // SetAllowedCustomAggregationTypesFor sets the list of allowed custom aggregation // types for a given metric type. - SetAllowedCustomAggregationTypesFor(t metric.Type, aggTypes policy.AggregationTypes) Options + SetAllowedCustomAggregationTypesFor(t metric.Type, aggTypes aggregation.Types) Options // SetMetricTypesFn sets the metric types function. SetMetricTypesFn(value MetricTypesFn) Options @@ -88,18 +89,18 @@ type Options interface { // IsAllowedCustomAggregationTypeFor determines whether a given aggregation type is allowed for // the given metric type. - IsAllowedCustomAggregationTypeFor(t metric.Type, aggType policy.AggregationType) bool + IsAllowedCustomAggregationTypeFor(t metric.Type, aggType aggregation.Type) bool } type validationMetadata struct { allowedStoragePolicies map[policy.StoragePolicy]struct{} - allowedCustomAggTypes map[policy.AggregationType]struct{} + allowedCustomAggTypes map[aggregation.Type]struct{} } type options struct { namespaceValidator namespace.Validator defaultAllowedStoragePolicies map[policy.StoragePolicy]struct{} - defaultAllowedCustomAggregationTypes map[policy.AggregationType]struct{} + defaultAllowedCustomAggregationTypes map[aggregation.Type]struct{} metricTypesFn MetricTypesFn requiredRollupTags []string metricNameInvalidChars map[rune]struct{} @@ -129,7 +130,7 @@ func (o *options) SetDefaultAllowedStoragePolicies(value []policy.StoragePolicy) return o } -func (o *options) SetDefaultAllowedCustomAggregationTypes(value policy.AggregationTypes) Options { +func (o *options) SetDefaultAllowedCustomAggregationTypes(value aggregation.Types) Options { o.defaultAllowedCustomAggregationTypes = toAggregationTypeSet(value) return o } @@ -141,7 +142,7 @@ func (o *options) SetAllowedStoragePoliciesFor(t metric.Type, policies []policy. return o } -func (o *options) SetAllowedCustomAggregationTypesFor(t metric.Type, aggTypes policy.AggregationTypes) Options { +func (o *options) SetAllowedCustomAggregationTypesFor(t metric.Type, aggTypes aggregation.Types) Options { metadata := o.findOrCreateMetadata(t) metadata.allowedCustomAggTypes = toAggregationTypeSet(aggTypes) o.metadatasByType[t] = metadata @@ -203,7 +204,7 @@ func (o *options) IsAllowedStoragePolicyFor(t metric.Type, p policy.StoragePolic return found } -func (o *options) IsAllowedCustomAggregationTypeFor(t metric.Type, aggType policy.AggregationType) bool { +func (o *options) IsAllowedCustomAggregationTypeFor(t metric.Type, aggType aggregation.Type) bool { if metadata, exists := o.metadatasByType[t]; exists { _, found := metadata.allowedCustomAggTypes[aggType] return found @@ -230,8 +231,8 @@ func toStoragePolicySet(policies []policy.StoragePolicy) map[policy.StoragePolic return m } -func toAggregationTypeSet(aggTypes policy.AggregationTypes) map[policy.AggregationType]struct{} { - m := make(map[policy.AggregationType]struct{}, len(aggTypes)) +func toAggregationTypeSet(aggTypes aggregation.Types) map[aggregation.Type]struct{} { + m := make(map[aggregation.Type]struct{}, len(aggTypes)) for _, t := range aggTypes { m[t] = struct{}{} } diff --git a/rules/validator/validator.go b/rules/validator/validator.go index 657010b..2272f7c 100644 --- a/rules/validator/validator.go +++ b/rules/validator/validator.go @@ -266,7 +266,7 @@ func (v *validator) validatePolicy(t metric.Type, p policy.Policy) error { if isDefaultAggFn := p.AggregationID.IsDefault(); isDefaultAggFn { return nil } - aggTypes, err := p.AggregationID.AggregationTypes() + aggTypes, err := p.AggregationID.Types() if err != nil { return err } diff --git a/rules/validator/validator_test.go b/rules/validator/validator_test.go index 5cce3b1..5d7503b 100644 --- a/rules/validator/validator_test.go +++ b/rules/validator/validator_test.go @@ -27,6 +27,7 @@ import ( "github.com/m3db/m3cluster/generated/proto/commonpb" "github.com/m3db/m3cluster/kv/mem" + "github.com/m3db/m3metrics/aggregation" "github.com/m3db/m3metrics/errors" "github.com/m3db/m3metrics/filters" "github.com/m3db/m3metrics/generated/proto/schema" @@ -171,7 +172,7 @@ func TestValidatorValidateMappingRuleDuplicatePolicies(t *testing.T) { } func TestValidatorValidateMappingRuleCustomAggregationTypes(t *testing.T) { - testAggregationTypes := []policy.AggregationType{policy.Count, policy.Max} + testAggregationTypes := []aggregation.Type{aggregation.Count, aggregation.Max} ruleSet := testRuleSetWithMappingRules(t, testCustomAggregationTypeMappingRulesConfig()) inputs := []struct { opts Options @@ -344,7 +345,7 @@ func TestValidatorValidateRollupRuleWithDuplicatePolicies(t *testing.T) { } func TestValidatorValidateRollupRuleCustomAggregationTypes(t *testing.T) { - testAggregationTypes := []policy.AggregationType{policy.Count, policy.Max} + testAggregationTypes := []aggregation.Type{aggregation.Count, aggregation.Max} ruleSet := testRuleSetWithRollupRules(t, testCustomAggregationTypeRollupRulesConfig()) inputs := []struct { opts Options