From 27264bf7f768aebd5740b7b7ade14bcfef7da553 Mon Sep 17 00:00:00 2001 From: Chao Wang Date: Thu, 25 May 2017 14:49:25 -0400 Subject: [PATCH] rename --- .excludelint | 1 + generated/proto/policy.proto | 33 +- generated/proto/schema/namespace.pb.go | 1 + generated/proto/schema/policy.pb.go | 131 +++- glide.lock | 8 +- glide.yaml | 2 + metric/aggregated/types.go | 8 +- policy/aggregation_id_compress.go | 105 +++ policy/aggregation_id_compress_test.go | 69 ++ policy/aggregation_type.go | 274 +++++++ policy/aggregation_type_test.go | 100 +++ policy/aggregationtype_string.go | 16 + policy/policy.go | 217 ++---- policy/policy_benchmark_test.go | 4 +- policy/policy_test.go | 322 ++------- policy/staged_policy.go | 129 ++++ policy/staged_policy_test.go | 236 ++++++ policy/storage_policy.go | 123 ++++ policy/storage_policy_test.go | 205 ++++++ protocol/msgpack/aggregated_encoder.go | 12 +- protocol/msgpack/aggregated_encoder_test.go | 4 +- protocol/msgpack/aggregated_iterator.go | 6 +- protocol/msgpack/aggregated_iterator_test.go | 4 +- protocol/msgpack/aggregated_roundtrip_test.go | 16 +- protocol/msgpack/base_encoder.go | 86 ++- protocol/msgpack/base_iterator.go | 50 +- protocol/msgpack/base_test.go | 45 ++ protocol/msgpack/raw_metric_test.go | 15 +- protocol/msgpack/schema.go | 18 +- protocol/msgpack/types.go | 8 +- protocol/msgpack/unaggregated_encoder_test.go | 33 +- .../msgpack/unaggregated_iterator_test.go | 10 +- .../msgpack/unaggregated_roundtrip_test.go | 54 +- rules/mapping_test.go | 49 +- rules/result_test.go | 28 +- rules/rollup_test.go | 55 +- rules/ruleset.go | 20 +- rules/ruleset_test.go | 672 +++++++++++------- 38 files changed, 2313 insertions(+), 856 deletions(-) create mode 100644 policy/aggregation_id_compress.go create mode 100644 policy/aggregation_id_compress_test.go create mode 100644 policy/aggregation_type.go create mode 100644 policy/aggregation_type_test.go create mode 100644 policy/aggregationtype_string.go create mode 100644 policy/staged_policy.go create mode 100644 policy/staged_policy_test.go create mode 100644 policy/storage_policy.go create mode 100644 policy/storage_policy_test.go create mode 100644 protocol/msgpack/base_test.go diff --git a/.excludelint b/.excludelint index 5093d7e..e8e59d7 100644 --- a/.excludelint +++ b/.excludelint @@ -1,3 +1,4 @@ (vendor/) (generated/) (_mock.go) +(_string.go) diff --git a/generated/proto/policy.proto b/generated/proto/policy.proto index 437febd..7fc0b60 100644 --- a/generated/proto/policy.proto +++ b/generated/proto/policy.proto @@ -22,15 +22,38 @@ syntax = "proto3"; package schema; message Resolution { - int64 window_size = 1; - int64 precision = 2; + int64 window_size = 1; + int64 precision = 2; } message Retention { - int64 period = 1; + int64 period = 1; +} + +message StoragePolicy { + Resolution resolution = 1; + Retention retention = 2; } message Policy { - Resolution resolution = 1; - Retention retention = 2; + StoragePolicy storage_policy = 1; + repeated AggregationType aggregation_types = 2; +} + +enum AggregationType { + UNKNOWN = 0; + LAST = 1; + LOWER = 2; + UPPER = 3; + MEAN = 4; + MEDIAN = 5; + COUNT = 6; + SUM = 7; + SUMSQ = 8; + STDEV = 9; + P50 = 10; + P95 = 11; + P99 = 12; + P999 = 13; + P9999 = 14; } diff --git a/generated/proto/schema/namespace.pb.go b/generated/proto/schema/namespace.pb.go index 96cdfad..f0eadc0 100644 --- a/generated/proto/schema/namespace.pb.go +++ b/generated/proto/schema/namespace.pb.go @@ -36,6 +36,7 @@ It has these top-level messages: Namespaces Resolution Retention + StoragePolicy Policy MappingRuleSnapshot MappingRule diff --git a/generated/proto/schema/policy.pb.go b/generated/proto/schema/policy.pb.go index 60c6a77..8a42576 100644 --- a/generated/proto/schema/policy.pb.go +++ b/generated/proto/schema/policy.pb.go @@ -33,6 +33,66 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +type AggregationType int32 + +const ( + AggregationType_UNKNOWN AggregationType = 0 + AggregationType_LAST AggregationType = 1 + AggregationType_LOWER AggregationType = 2 + AggregationType_UPPER AggregationType = 3 + AggregationType_MEAN AggregationType = 4 + AggregationType_MEDIAN AggregationType = 5 + AggregationType_COUNT AggregationType = 6 + AggregationType_SUM AggregationType = 7 + AggregationType_SUMSQ AggregationType = 8 + AggregationType_STDEV AggregationType = 9 + AggregationType_P50 AggregationType = 10 + AggregationType_P95 AggregationType = 11 + AggregationType_P99 AggregationType = 12 + AggregationType_P999 AggregationType = 13 + AggregationType_P9999 AggregationType = 14 +) + +var AggregationType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "LAST", + 2: "LOWER", + 3: "UPPER", + 4: "MEAN", + 5: "MEDIAN", + 6: "COUNT", + 7: "SUM", + 8: "SUMSQ", + 9: "STDEV", + 10: "P50", + 11: "P95", + 12: "P99", + 13: "P999", + 14: "P9999", +} +var AggregationType_value = map[string]int32{ + "UNKNOWN": 0, + "LAST": 1, + "LOWER": 2, + "UPPER": 3, + "MEAN": 4, + "MEDIAN": 5, + "COUNT": 6, + "SUM": 7, + "SUMSQ": 8, + "STDEV": 9, + "P50": 10, + "P95": 11, + "P99": 12, + "P999": 13, + "P9999": 14, +} + +func (x AggregationType) String() string { + return proto.EnumName(AggregationType_name, int32(x)) +} +func (AggregationType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + type Resolution struct { WindowSize int64 `protobuf:"varint,1,opt,name=window_size,json=windowSize" json:"window_size,omitempty"` Precision int64 `protobuf:"varint,2,opt,name=precision" json:"precision,omitempty"` @@ -52,50 +112,81 @@ func (m *Retention) String() string { return proto.CompactTextString( func (*Retention) ProtoMessage() {} func (*Retention) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } -type Policy struct { +type StoragePolicy struct { Resolution *Resolution `protobuf:"bytes,1,opt,name=resolution" json:"resolution,omitempty"` Retention *Retention `protobuf:"bytes,2,opt,name=retention" json:"retention,omitempty"` } -func (m *Policy) Reset() { *m = Policy{} } -func (m *Policy) String() string { return proto.CompactTextString(m) } -func (*Policy) ProtoMessage() {} -func (*Policy) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } +func (m *StoragePolicy) Reset() { *m = StoragePolicy{} } +func (m *StoragePolicy) String() string { return proto.CompactTextString(m) } +func (*StoragePolicy) ProtoMessage() {} +func (*StoragePolicy) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } -func (m *Policy) GetResolution() *Resolution { +func (m *StoragePolicy) GetResolution() *Resolution { if m != nil { return m.Resolution } return nil } -func (m *Policy) GetRetention() *Retention { +func (m *StoragePolicy) GetRetention() *Retention { if m != nil { return m.Retention } return nil } +type Policy struct { + StoragePolicy *StoragePolicy `protobuf:"bytes,1,opt,name=storage_policy,json=storagePolicy" json:"storage_policy,omitempty"` + AggregationTypes []AggregationType `protobuf:"varint,2,rep,packed,name=aggregation_types,json=aggregationTypes,enum=schema.AggregationType" json:"aggregation_types,omitempty"` +} + +func (m *Policy) Reset() { *m = Policy{} } +func (m *Policy) String() string { return proto.CompactTextString(m) } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *Policy) GetStoragePolicy() *StoragePolicy { + if m != nil { + return m.StoragePolicy + } + return nil +} + func init() { proto.RegisterType((*Resolution)(nil), "schema.Resolution") proto.RegisterType((*Retention)(nil), "schema.Retention") + proto.RegisterType((*StoragePolicy)(nil), "schema.StoragePolicy") proto.RegisterType((*Policy)(nil), "schema.Policy") + proto.RegisterEnum("schema.AggregationType", AggregationType_name, AggregationType_value) } func init() { proto.RegisterFile("policy.proto", fileDescriptor1) } var fileDescriptor1 = []byte{ - // 183 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x8f, 0xc1, 0xca, 0xc2, 0x30, - 0x10, 0x84, 0xe9, 0xff, 0x43, 0xa0, 0x5b, 0x2f, 0xe6, 0x20, 0x3d, 0x08, 0x4a, 0xbd, 0x78, 0xaa, - 0x50, 0x1f, 0xc3, 0x8b, 0xc4, 0x07, 0x10, 0x6d, 0x17, 0x5c, 0x68, 0xb3, 0x21, 0xa9, 0x14, 0xfb, - 0xf4, 0xc2, 0xb6, 0x1a, 0x8f, 0x99, 0x99, 0xcc, 0x7c, 0x0b, 0x0b, 0xc7, 0x2d, 0xd5, 0xaf, 0xd2, - 0x79, 0xee, 0x59, 0xab, 0x50, 0x3f, 0xb0, 0xbb, 0x15, 0x27, 0x00, 0x83, 0x81, 0xdb, 0x67, 0x4f, - 0x6c, 0xf5, 0x06, 0xb2, 0x81, 0x6c, 0xc3, 0xc3, 0x35, 0xd0, 0x88, 0x79, 0xb2, 0x4d, 0xf6, 0xff, - 0x06, 0x26, 0xe9, 0x42, 0x23, 0xea, 0x35, 0xa4, 0xce, 0x63, 0x4d, 0x81, 0xd8, 0xe6, 0x7f, 0x62, - 0x47, 0xa1, 0xd8, 0x41, 0x6a, 0xb0, 0x47, 0x2b, 0x5d, 0x2b, 0x50, 0x0e, 0x3d, 0x71, 0x33, 0xd7, - 0xcc, 0xaf, 0xa2, 0x03, 0x75, 0x16, 0x12, 0x5d, 0x01, 0xf8, 0xef, 0xb6, 0xa4, 0xb2, 0x4a, 0x97, - 0x13, 0x58, 0x19, 0xa9, 0xcc, 0x4f, 0x4a, 0x1f, 0x20, 0xf5, 0x9f, 0x09, 0x01, 0xc8, 0xaa, 0x65, - 0xfc, 0x32, 0x1b, 0x26, 0x66, 0xee, 0x4a, 0xee, 0x3d, 0xbe, 0x03, 0x00, 0x00, 0xff, 0xff, 0x39, - 0x9b, 0xa0, 0x22, 0xff, 0x00, 0x00, 0x00, + // 379 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0x92, 0x41, 0x8f, 0x93, 0x40, + 0x1c, 0xc5, 0xa5, 0xec, 0xd2, 0xe5, 0xcf, 0xb6, 0xfe, 0x77, 0x12, 0xb5, 0x07, 0x13, 0x37, 0x78, + 0xd9, 0x78, 0xa8, 0x06, 0xb3, 0x07, 0x12, 0x2f, 0xc4, 0x72, 0x30, 0xbb, 0x50, 0x1c, 0xc0, 0x1e, + 0x1b, 0x6c, 0x27, 0x38, 0x49, 0x65, 0x08, 0x83, 0x69, 0xda, 0xcf, 0xe0, 0x67, 0xf1, 0x33, 0x9a, + 0x19, 0xa8, 0xd8, 0xbd, 0x3d, 0xde, 0x7b, 0xbc, 0xff, 0x2f, 0x04, 0xb8, 0xae, 0xc5, 0x8e, 0x6f, + 0x0e, 0xf3, 0xba, 0x11, 0xad, 0x20, 0x96, 0xdc, 0xfc, 0x60, 0x3f, 0x0b, 0xf7, 0x01, 0x80, 0x32, + 0x29, 0x76, 0xbf, 0x5a, 0x2e, 0x2a, 0xf2, 0x06, 0x9c, 0x3d, 0xaf, 0xb6, 0x62, 0xbf, 0x96, 0xfc, + 0xc8, 0x66, 0xc6, 0xad, 0x71, 0x67, 0x52, 0xe8, 0xac, 0x94, 0x1f, 0x19, 0x79, 0x0d, 0x76, 0xdd, + 0xb0, 0x0d, 0x97, 0x5c, 0x54, 0xb3, 0x91, 0x8e, 0x07, 0xc3, 0x7d, 0x0b, 0x36, 0x65, 0x2d, 0xab, + 0xf4, 0xd6, 0x4b, 0xb0, 0x6a, 0xd6, 0x70, 0xb1, 0xed, 0x67, 0xfa, 0x27, 0xb7, 0x85, 0x49, 0xda, + 0x8a, 0xa6, 0x28, 0x59, 0xa2, 0x81, 0x88, 0x07, 0xd0, 0xfc, 0x43, 0xd0, 0x65, 0xc7, 0x23, 0xf3, + 0x8e, 0x6f, 0x3e, 0xc0, 0xd1, 0xff, 0x5a, 0xe4, 0x3d, 0xd8, 0xcd, 0xe9, 0x92, 0xe6, 0x70, 0xbc, + 0x9b, 0xe1, 0x95, 0x3e, 0xa0, 0x43, 0xc7, 0xfd, 0x6d, 0x80, 0xd5, 0xdf, 0xfb, 0x04, 0x53, 0xd9, + 0x01, 0xac, 0xbb, 0x4f, 0xd2, 0xdf, 0x7c, 0x71, 0x1a, 0x38, 0xc3, 0xa3, 0x13, 0x79, 0x46, 0xbb, + 0x80, 0x9b, 0xa2, 0x2c, 0x1b, 0x56, 0x16, 0x6a, 0x77, 0xdd, 0x1e, 0x6a, 0x26, 0x67, 0xa3, 0x5b, + 0xf3, 0x6e, 0xea, 0xbd, 0x3a, 0x0d, 0x04, 0x43, 0x21, 0x3b, 0xd4, 0x8c, 0x62, 0x71, 0x6e, 0xc8, + 0x77, 0x7f, 0x0c, 0x78, 0xfe, 0xa4, 0x45, 0x1c, 0x18, 0xe7, 0xf1, 0x43, 0xbc, 0x5c, 0xc5, 0xf8, + 0x8c, 0x5c, 0xc1, 0xc5, 0x63, 0x90, 0x66, 0x68, 0x10, 0x1b, 0x2e, 0x1f, 0x97, 0xab, 0x90, 0xe2, + 0x48, 0xc9, 0x3c, 0x49, 0x42, 0x8a, 0xa6, 0xca, 0xa3, 0x30, 0x88, 0xf1, 0x82, 0x00, 0x58, 0x51, + 0xb8, 0xf8, 0x12, 0xc4, 0x78, 0xa9, 0x0a, 0x9f, 0x97, 0x79, 0x9c, 0xa1, 0x45, 0xc6, 0x60, 0xa6, + 0x79, 0x84, 0x63, 0xe5, 0xa5, 0x79, 0x94, 0x7e, 0xc5, 0x2b, 0x2d, 0xb3, 0x45, 0xf8, 0x0d, 0x6d, + 0x15, 0x27, 0xf7, 0x1f, 0x10, 0xb4, 0xf0, 0xef, 0xd1, 0xe9, 0x84, 0x8f, 0xd7, 0x6a, 0x3a, 0xf1, + 0x7d, 0x1f, 0x27, 0xaa, 0xaf, 0x94, 0x8f, 0xd3, 0xef, 0x96, 0xfe, 0x6d, 0x3e, 0xfe, 0x0d, 0x00, + 0x00, 0xff, 0xff, 0x39, 0x87, 0xa5, 0x4b, 0x46, 0x02, 0x00, 0x00, } diff --git a/glide.lock b/glide.lock index 7c6afb7..e30a45c 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 07a1a24c24370b50b8cb4f21bdd6fd9b1b787f00c87b1f3f95715f93d84594f2 -updated: 2017-05-09T23:04:21.133965573-04:00 +hash: 5562b649a1a9f318893ccf7bf26795bdc215d6170cd947d057d6faa87702ec05 +updated: 2017-05-23T19:50:01.633707628-04:00 imports: - name: github.com/apache/thrift version: 9549b25c77587b29be4e0b5c258221a4ed85d37a @@ -50,6 +50,8 @@ imports: - m3/customtransports - m3/thrift - m3/thriftudp +- name: github.com/willf/bitset + version: 1ea0245d2bc8ce44623f24a1ae162beb06ad8cd6 - name: golang.org/x/net version: f2499483f923065a842d38eb4c7f1927e6fc6e6d subpackages: @@ -72,6 +74,8 @@ imports: - name: gopkg.in/yaml.v2 version: a83829b6f1293c91addabc89d0571c246397bbf4 testImports: +- name: github.com/cw9/bitset + version: d79da395ef228e37d81befaf8c71827ab0cb2275 - name: github.com/davecgh/go-spew version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d subpackages: diff --git a/glide.yaml b/glide.yaml index 91c346e..83fbbf8 100644 --- a/glide.yaml +++ b/glide.yaml @@ -58,3 +58,5 @@ testImport: version: d77da356e56a7428ad25149ca77381849a6a5232 subpackages: - require +- package: github.com/cw9/bitset + version: d79da395ef228e37d81befaf8c71827ab0cb2275 diff --git a/metric/aggregated/types.go b/metric/aggregated/types.go index a15b1f4..7c166b5 100644 --- a/metric/aggregated/types.go +++ b/metric/aggregated/types.go @@ -77,22 +77,22 @@ type RawMetric interface { // MetricWithPolicy is a metric with applicable policy. type MetricWithPolicy struct { Metric - policy.Policy + policy.StoragePolicy } // String is the string representation of a metric with policy. func (mp MetricWithPolicy) String() string { - return fmt.Sprintf("{metric:%s,policy:%s}", mp.Metric.String(), mp.Policy.String()) + return fmt.Sprintf("{metric:%s,policy:%s}", mp.Metric.String(), mp.StoragePolicy.String()) } // ChunkedMetricWithPolicy is a chunked metric with applicable policy. type ChunkedMetricWithPolicy struct { ChunkedMetric - policy.Policy + policy.StoragePolicy } // RawMetricWithPolicy is a raw metric with applicable policy. type RawMetricWithPolicy struct { RawMetric - policy.Policy + policy.StoragePolicy } diff --git a/policy/aggregation_id_compress.go b/policy/aggregation_id_compress.go new file mode 100644 index 0000000..6f2402e --- /dev/null +++ b/policy/aggregation_id_compress.go @@ -0,0 +1,105 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package policy + +import ( + "fmt" + + "github.com/cw9/bitset" +) + +// AggregationIDCompressor can compress AggregationTypes into an AggregationID. +type AggregationIDCompressor interface { + Compress(aggTypes AggregationTypes) (AggregationID, error) +} + +// AggregationIDDecompressor can decompress AggregationID. +type AggregationIDDecompressor interface { + // Decompress decompresses aggregation types, + // returns error if any invalid aggregation type is encountered. + Decompress(compressed AggregationID) (AggregationTypes, error) +} + +type aggregationIDCompressor struct { + bs *bitset.BitSet +} + +// NewAggregationTypeCompressor returns a new AggregationTypeCompressor. +func NewAggregationTypeCompressor() AggregationIDCompressor { + // NB(cw): If we start to support more than 64 types, the library will + // expand the underlying word list itself. + return &aggregationIDCompressor{ + bs: bitset.New(totalAggregationTypes), + } +} + +func (c *aggregationIDCompressor) Compress(aggTypes AggregationTypes) (AggregationID, error) { + c.bs.ClearAll() + for _, aggType := range aggTypes { + if !aggType.IsValid() { + return DefaultAggregationID, fmt.Errorf("could not compress invalid AggregationType %v", aggType) + } + c.bs.Set(uint(aggType)) + } + codes := c.bs.Bytes() + var id AggregationID + for i := 0; i < AggregationIDLen; i++ { + id[i] = codes[i] + } + return id, nil +} + +type aggregationIDDecompressor struct { + bs *bitset.BitSet + buf []uint64 + res []AggregationType +} + +// NewAggregationTypeDecompressor returns a new AggregationTypeDecompressor. +func NewAggregationTypeDecompressor() AggregationIDDecompressor { + bs := bitset.New(totalAggregationTypes) + return &aggregationIDDecompressor{ + bs: bs, + buf: bs.Bytes(), + res: make([]AggregationType, 0, totalAggregationTypes), + } +} + +func (c *aggregationIDDecompressor) Decompress(id AggregationID) (AggregationTypes, error) { + for i := range id { + c.buf[i] = id[i] + } + + c.bs.Reset(c.buf) + + c.res = c.res[:0] + for i, e := c.bs.NextSet(0); e; i, e = c.bs.NextSet(i + 1) { + aggType := AggregationType(i) + if !aggType.IsValid() { + return DefaultAggregationTypes, fmt.Errorf("invalid AggregationType: %s", aggType.String()) + } + c.res = append(c.res, aggType) + } + + res := make(AggregationTypes, len(c.res)) + copy(res, c.res) + return res, nil +} diff --git a/policy/aggregation_id_compress_test.go b/policy/aggregation_id_compress_test.go new file mode 100644 index 0000000..4318968 --- /dev/null +++ b/policy/aggregation_id_compress_test.go @@ -0,0 +1,69 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package policy + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAggregationIDCompressRoundTrip(t *testing.T) { + testcases := []struct { + input AggregationTypes + result AggregationTypes + expectErr bool + }{ + {DefaultAggregationTypes, AggregationTypes{}, false}, + {[]AggregationType{Unknown}, DefaultAggregationTypes, true}, + {[]AggregationType{Lower, Upper}, []AggregationType{Lower, Upper}, false}, + {[]AggregationType{Last}, []AggregationType{Last}, false}, + {[]AggregationType{P999, P9999}, []AggregationType{P999, P9999}, false}, + {[]AggregationType{1, 5, 9, 3, 2}, []AggregationType{1, 2, 3, 5, 9}, false}, + // 20 is an Unknown aggregation type. + {[]AggregationType{10, 20}, DefaultAggregationTypes, true}, + } + + compressor, decompressor := NewAggregationTypeCompressor(), NewAggregationTypeDecompressor() + for _, test := range testcases { + codes, err := compressor.Compress(test.input) + if test.expectErr { + require.Error(t, err) + continue + } + res, err := decompressor.Decompress(codes) + require.NoError(t, err) + require.Equal(t, test.result, res) + } +} + +func TestAggregationIDDecompressError(t *testing.T) { + compressor, decompressor := NewAggregationTypeCompressor(), NewAggregationTypeDecompressor() + _, err := decompressor.Decompress([AggregationIDLen]uint64{1}) // aggregation type: Unknown. + require.Error(t, err) + + max, err := compressor.Compress([]AggregationType{Last, Lower, Upper, Mean, Median, Count, Sum, SumSq, Stdev, P95, P99, P999, P9999}) + require.NoError(t, err) + + max[0] = max[0] << 1 + _, err = decompressor.Decompress(max) + require.Error(t, err) +} diff --git a/policy/aggregation_type.go b/policy/aggregation_type.go new file mode 100644 index 0000000..f19593b --- /dev/null +++ b/policy/aggregation_type.go @@ -0,0 +1,274 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package policy + +import ( + "fmt" + "strings" + + "github.com/m3db/m3metrics/generated/proto/schema" +) + +// Supported aggregation types. +const ( + Unknown AggregationType = iota + Last + Lower + Upper + Mean + Median + Count + Sum + SumSq + Stdev + P50 + P95 + P99 + P999 + P9999 + totalAggregationTypes = iota +) + +const ( + // AggregationIDLen is the length of the AggregationID. + // The AggregationIDLen will be 1 when totalAggregationTypes <= 64. + AggregationIDLen = (totalAggregationTypes-1)/64 + 1 + + aggregationTypesSeparator = "," +) + +var ( + // DefaultAggregationTypes is a default list of aggregation types. + DefaultAggregationTypes AggregationTypes + + // DefaultAggregationID is a default AggregationID. + DefaultAggregationID AggregationID + + // ValidAggregationTypes is the list of all the valid aggregation types + ValidAggregationTypes = []AggregationType{ + Last, + Lower, + Upper, + Mean, + Median, + Count, + Sum, + SumSq, + Stdev, + P50, + P95, + P99, + P999, + P9999, + } + + aggregationTypeStringMap map[string]AggregationType +) + +func init() { + aggregationTypeStringMap = make(map[string]AggregationType, totalAggregationTypes) + for _, aggType := range ValidAggregationTypes { + aggregationTypeStringMap[aggType.String()] = aggType + } +} + +// AggregationType defines a custom aggregation function. +type AggregationType int + +// NewAggregationTypeFromSchema creates an aggregation type from a schema. +func NewAggregationTypeFromSchema(input schema.AggregationType) (AggregationType, error) { + aggType := AggregationType(input) + if !aggType.IsValid() { + return Unknown, fmt.Errorf("invalid aggregation type from schema: %s", input) + } + return aggType, nil +} + +// IsValid checks if an AggregationType is valid. +func (a AggregationType) IsValid() bool { + return a > 0 && a < totalAggregationTypes +} + +// IsValidForGauge if an AggregationType is valid for Gauge. +func (a AggregationType) IsValidForGauge() bool { + switch a { + case Last, Lower, Upper, Mean, Count, Sum, SumSq, Stdev: + return true + default: + return false + } +} + +// IsValidForCounter if an AggregationType is valid for Counter. +func (a AggregationType) IsValidForCounter() bool { + switch a { + case Lower, Upper, Mean, Count, Sum, SumSq, Stdev: + return true + default: + return false + } +} + +// IsValidForTimer if an AggregationType is valid for Timer. +func (a AggregationType) IsValidForTimer() bool { + switch a { + case Last: + return false + default: + return true + } +} + +// ParseAggregationType parses an aggregation type. +func ParseAggregationType(str string) (AggregationType, bool) { + aggType, ok := aggregationTypeStringMap[str] + return aggType, ok +} + +// AggregationTypes is a list of AggregationTypes. +type AggregationTypes []AggregationType + +// NewAggregationTypesFromSchema creates a list of aggregation types from a schema. +func NewAggregationTypesFromSchema(input []schema.AggregationType) (AggregationTypes, error) { + res := make([]AggregationType, len(input)) + for i, t := range input { + aggType, err := NewAggregationTypeFromSchema(t) + if err != nil { + return DefaultAggregationTypes, err + } + res[i] = aggType + } + return res, nil +} + +// IsDefault checks if the AggregationTypes is the default aggregation type. +func (aggTypes AggregationTypes) IsDefault() bool { + return len(aggTypes) == 0 +} + +// String is for debugging. +func (aggTypes AggregationTypes) String() string { + if len(aggTypes) == 0 { + return "" + } + + parts := make([]string, len(aggTypes)) + for i, aggType := range aggTypes { + parts[i] = aggType.String() + } + return strings.Join(parts, aggregationTypesSeparator) +} + +// IsValidForGauge checks if the list of aggregation types is valid for Gauge. +func (aggTypes AggregationTypes) IsValidForGauge() bool { + for _, aggType := range aggTypes { + if !aggType.IsValidForGauge() { + return false + } + } + return true +} + +// IsValidForCounter checks if the list of aggregation types is valid for Counter. +func (aggTypes AggregationTypes) IsValidForCounter() bool { + for _, aggType := range aggTypes { + if !aggType.IsValidForCounter() { + return false + } + } + return true +} + +// IsValidForTimer checks if the list of aggregation types is valid for Timer. +func (aggTypes AggregationTypes) IsValidForTimer() bool { + for _, aggType := range aggTypes { + if !aggType.IsValidForTimer() { + return false + } + } + return true +} + +// ParseAggregationTypes parses a list of aggregation types in the form of type1,type2,type3. +func ParseAggregationTypes(str string) (AggregationTypes, error) { + parts := strings.Split(str, aggregationTypesSeparator) + res := make(AggregationTypes, len(parts)) + for i := range parts { + aggType, ok := ParseAggregationType(parts[i]) + if !ok { + return nil, fmt.Errorf("invalid aggregation type: %s", parts[i]) + } + res[i] = aggType + } + return res, nil +} + +// AggregationID represents a compressed view of AggregationTypes. +type AggregationID [AggregationIDLen]uint64 + +// NewAggregationIDFromSchema creates an AggregationID from schema. +func NewAggregationIDFromSchema(input []schema.AggregationType) (AggregationID, error) { + aggTypes, err := NewAggregationTypesFromSchema(input) + if err != nil { + return DefaultAggregationID, err + } + + // TODO(cw): consider pooling these compressors, + // this allocates one extra slice of length one per call. + id, err := NewAggregationTypeCompressor().Compress(aggTypes) + if err != nil { + return DefaultAggregationID, err + } + return id, nil +} + +// IsDefault checks if the AggregationID is the default aggregation type. +func (id AggregationID) IsDefault() bool { + return id == DefaultAggregationID +} + +// Merge returns the result of merging another AggregationID, with an indicater whether +// any new aggregation type was found in the other AggregationID. +func (id AggregationID) Merge(other AggregationID) (AggregationID, bool) { + var merged bool + for i, code := range id { + otherCode := other[i] + if otherCode == 0 { + continue + } + mergeResult := code | otherCode + if code != mergeResult { + merged = true + id[i] = mergeResult + } + } + return id, merged +} + +// String for debugging. +func (id AggregationID) String() string { + aggTypes, err := NewAggregationTypeDecompressor().Decompress(id) + if err != nil { + return fmt.Sprintf("[invalid AggregationID: %v]", err) + } + + return aggTypes.String() +} diff --git a/policy/aggregation_type_test.go b/policy/aggregation_type_test.go new file mode 100644 index 0000000..d851a17 --- /dev/null +++ b/policy/aggregation_type_test.go @@ -0,0 +1,100 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package policy + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAggregationTypeIsValid(t *testing.T) { + require.True(t, P9999.IsValid()) + require.False(t, AggregationType(int(P9999)+1).IsValid()) +} + +func TestAggregationTypesIsDefault(t *testing.T) { + require.True(t, DefaultAggregationTypes.IsDefault()) + + require.False(t, AggregationTypes{Upper}.IsDefault()) +} + +func TestCompressedAggregationTypesIsDefault(t *testing.T) { + var id AggregationID + require.True(t, id.IsDefault()) + + id[0] = 8 + require.False(t, id.IsDefault()) + + id[0] = 0 + require.True(t, id.IsDefault()) +} + +func TestParseParseAggregationTypes(t *testing.T) { + inputs := []struct { + str string + expected AggregationTypes + }{ + { + str: "Lower", + expected: AggregationTypes{Lower}, + }, + { + str: "Lower,Upper", + expected: AggregationTypes{Lower, Upper}, + }, + } + for _, input := range inputs { + res, err := ParseAggregationTypes(input.str) + require.NoError(t, err) + require.Equal(t, input.expected, res) + } +} + +func TestCompressedAggregationTypesMerge(t *testing.T) { + testcases := []struct { + input AggregationID + other AggregationID + result AggregationID + merged bool + }{ + {DefaultAggregationID, DefaultAggregationID, DefaultAggregationID, false}, + {mustCompress(Mean), DefaultAggregationID, mustCompress(Mean), false}, + {DefaultAggregationID, mustCompress(Mean), mustCompress(Mean), true}, + {mustCompress(Lower), mustCompress(Upper), mustCompress(Lower, Upper), true}, + {mustCompress(Lower), mustCompress(Lower, Upper), mustCompress(Lower, Upper), true}, + {mustCompress(Lower, Upper), mustCompress(Lower), mustCompress(Lower, Upper), false}, + } + + for _, test := range testcases { + res, merged := test.input.Merge(test.other) + require.Equal(t, test.result, res) + require.Equal(t, test.merged, merged) + } +} + +func mustCompress(aggTypes ...AggregationType) AggregationID { + res, err := NewAggregationTypeCompressor().Compress(aggTypes) + if err != nil { + panic(err.Error()) + } + return res +} diff --git a/policy/aggregationtype_string.go b/policy/aggregationtype_string.go new file mode 100644 index 0000000..227e879 --- /dev/null +++ b/policy/aggregationtype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type AggregationType"; DO NOT EDIT + +package policy + +import "fmt" + +const _AggregationType_name = "UnknownLastLowerUpperMeanMedianCountSumSumSqStdevP50P95P99P999P9999" + +var _AggregationType_index = [...]uint8{0, 7, 11, 16, 21, 25, 31, 36, 39, 44, 49, 52, 55, 58, 62, 67} + +func (i AggregationType) String() string { + if i < 0 || i >= AggregationType(len(_AggregationType_index)-1) { + return fmt.Sprintf("AggregationType(%d)", i) + } + return _AggregationType_name[_AggregationType_index[i]:_AggregationType_index[i+1]] +} diff --git a/policy/policy.go b/policy/policy.go index 8601b41..f6b78a6 100644 --- a/policy/policy.go +++ b/policy/policy.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -21,90 +21,63 @@ package policy import ( - "bytes" "errors" - "fmt" "strings" - "time" "github.com/m3db/m3metrics/generated/proto/schema" - "github.com/m3db/m3x/time" ) const ( - // InitPolicyVersion is the version of an uninitialized policy. - InitPolicyVersion = -1 - - // DefaultPolicyVersion is the version for the default policy. - DefaultPolicyVersion = 0 - - resolutionRetentionSeparator = ":" + policyAggregationTypeSeparator = "|" ) var ( - // EmptyPolicy represents an empty policy. - EmptyPolicy Policy - - // DefaultStagedPolicies represents a default staged policies. - DefaultStagedPolicies StagedPolicies - - // DefaultPoliciesList represents a default policies list. - DefaultPoliciesList = PoliciesList{DefaultStagedPolicies} + // DefaultPolicy represents a default policy. + DefaultPolicy Policy errNilPolicySchema = errors.New("nil policy schema") errInvalidPolicyString = errors.New("invalid policy string") ) -// Policy represents the resolution and retention period metric datapoints -// are stored at. +// Policy contains a policy and a list of custom aggregation types. type Policy struct { - resolution Resolution - retention Retention + sp StoragePolicy + aggID AggregationID } -// NewPolicy creates a new policy given a resolution window size and retention. -func NewPolicy(window time.Duration, precision xtime.Unit, retention time.Duration) Policy { - return Policy{ - resolution: Resolution{ - Window: window, - Precision: precision, - }, - retention: Retention(retention), - } +// NewPolicy creates a policy. +func NewPolicy(sp StoragePolicy, aggTypes AggregationID) Policy { + return Policy{sp: sp, aggID: aggTypes} } // NewPolicyFromSchema creates a new policy from a schema policy. func NewPolicyFromSchema(p *schema.Policy) (Policy, error) { if p == nil { - return EmptyPolicy, errNilPolicySchema + return DefaultPolicy, errNilPolicySchema } - precision := time.Duration(p.Resolution.Precision) - unit, err := xtime.UnitFromDuration(precision) + + policy, err := NewStoragePolicyFromSchema(p.StoragePolicy) if err != nil { - return EmptyPolicy, err + return DefaultPolicy, err } - return Policy{ - resolution: Resolution{ - Window: time.Duration(p.Resolution.WindowSize), - Precision: unit, - }, - retention: Retention(p.Retention.Period), - }, nil -} -// String is the string representation of a policy. -func (p Policy) String() string { - return fmt.Sprintf("%s%s%s", p.resolution.String(), resolutionRetentionSeparator, p.retention.String()) + aggID, err := NewAggregationIDFromSchema(p.AggregationTypes) + if err != nil { + return DefaultPolicy, err + } + + return NewPolicy(policy, aggID), nil + } -// Resolution returns the resolution of the policy. -func (p Policy) Resolution() Resolution { - return p.resolution +// StoragePolicy return the storage policy. +func (p Policy) StoragePolicy() StoragePolicy { + return p.sp } -// Retention return the retention of the policy. -func (p Policy) Retention() Retention { - return p.retention +// AggregationID return the AggregationID. +func (p Policy) AggregationID() AggregationID { + return p.aggID } // UnmarshalYAML unmarshals a policy value from a string. @@ -113,6 +86,7 @@ func (p *Policy) UnmarshalYAML(unmarshal func(interface{}) error) error { if err := unmarshal(&str); err != nil { return err } + parsed, err := ParsePolicy(str) if err != nil { return err @@ -121,25 +95,44 @@ func (p *Policy) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } -// ParsePolicy parses a policy in the form of resolution:retention. +// ParsePolicy parses a policy in the form of resolution:retention|aggregationTypes. func ParsePolicy(str string) (Policy, error) { - parts := strings.Split(str, resolutionRetentionSeparator) - if len(parts) != 2 { - return EmptyPolicy, errInvalidPolicyString + parts := strings.Split(str, policyAggregationTypeSeparator) + l := len(parts) + if l > 2 { + return DefaultPolicy, errInvalidPolicyString } - resolution, err := ParseResolution(parts[0]) + + p, err := ParseStoragePolicy(parts[0]) if err != nil { - return EmptyPolicy, err + return DefaultPolicy, err } - retentionDuration, err := xtime.ParseExtendedDuration(parts[1]) - if err != nil { - return EmptyPolicy, err + + var id = DefaultAggregationID + if l == 2 { + aggTypes, err := ParseAggregationTypes(parts[1]) + if err != nil { + return DefaultPolicy, err + } + + id, err = NewAggregationTypeCompressor().Compress(aggTypes) + if err != nil { + return DefaultPolicy, err + } } - retention := Retention(retentionDuration) - return Policy{resolution: resolution, retention: retention}, nil + + return NewPolicy(p, id), nil } -// NewPoliciesFromSchema creates multiple new policues from given schema policies. +// String is the string representation of a policy. +func (p Policy) String() string { + if p.aggID.IsDefault() { + return p.sp.String() + } + return p.sp.String() + policyAggregationTypeSeparator + p.aggID.String() +} + +// NewPoliciesFromSchema creates multiple new policies from given schema policies. func NewPoliciesFromSchema(policies []*schema.Policy) ([]Policy, error) { res := make([]Policy, 0, len(policies)) for _, p := range policies { @@ -162,98 +155,38 @@ func (pr ByResolutionAsc) Len() int { return len(pr) } func (pr ByResolutionAsc) Swap(i, j int) { pr[i], pr[j] = pr[j], pr[i] } func (pr ByResolutionAsc) Less(i, j int) bool { - w1, w2 := pr[i].Resolution().Window, pr[j].Resolution().Window - if w1 < w2 { + up1, up2 := pr[i], pr[j] + p1, p2 := up1.StoragePolicy(), up2.StoragePolicy() + rw1, rw2 := p1.Resolution().Window, p2.Resolution().Window + if rw1 < rw2 { return true } - if w1 > w2 { + if rw1 > rw2 { return false } - r1, r2 := pr[i].Retention(), pr[j].Retention() + r1, r2 := p1.Retention(), p2.Retention() if r1 > r2 { return true } if r1 < r2 { return false } - // NB(xichen): compare precision to ensure a deterministic ordering. - return pr[i].Resolution().Precision < pr[i].Resolution().Precision -} - -// StagedPolicies represent a list of policies at a specified version. -type StagedPolicies struct { - // Cutover is when the policies take effect. - CutoverNanos int64 - - // Tombstoned determines whether the associated (rollup) metric has been tombstoned. - Tombstoned bool - - // policies represent the list of policies. - policies []Policy -} - -// NewStagedPolicies create a new staged policies. -func NewStagedPolicies(cutoverNanos int64, tombstoned bool, policies []Policy) StagedPolicies { - return StagedPolicies{CutoverNanos: cutoverNanos, Tombstoned: tombstoned, policies: policies} -} - -// Reset resets the staged policies. -func (p *StagedPolicies) Reset() { *p = DefaultStagedPolicies } - -// IsDefault returns whether this is a default staged policies. -func (p StagedPolicies) IsDefault() bool { - return p.CutoverNanos == 0 && !p.Tombstoned && p.hasDefaultPolicies() -} - -// Policies returns the policies and whether the policies are the default policies. -func (p StagedPolicies) Policies() ([]Policy, bool) { - return p.policies, p.hasDefaultPolicies() -} - -// SamePolicies returns whether two staged policies have the same policy list, -// assuming the policies are sorted in the same order. -func (p StagedPolicies) SamePolicies(other StagedPolicies) bool { - currPolicies, currIsDefault := p.Policies() - otherPolicies, otherIsDefault := other.Policies() - if currIsDefault && otherIsDefault { + rp1, rp2 := p1.Resolution().Precision, p2.Resolution().Precision + if rp1 < rp2 { return true } - if currIsDefault || otherIsDefault { - return false - } - if len(currPolicies) != len(otherPolicies) { + if rp1 > rp2 { return false } - for i := 0; i < len(currPolicies); i++ { - if currPolicies[i] != otherPolicies[i] { + at1, at2 := up1.AggregationID(), up2.AggregationID() + for k := 0; k < AggregationIDLen; k++ { + if at1[k] < at2[k] { + return true + } + if at1[k] > at2[k] { return false } } + // If everything equals, prefer the first one return true } - -// String is the representation of staged policies. -func (p StagedPolicies) String() string { - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("{cutover:%s,tombstoned:%v,policies:[", time.Unix(0, p.CutoverNanos).String(), p.Tombstoned)) - for i := range p.policies { - buf.WriteString(p.policies[i].String()) - if i < len(p.policies)-1 { - buf.WriteString(",") - } - } - buf.WriteString("]}") - return buf.String() -} - -func (p StagedPolicies) hasDefaultPolicies() bool { - return len(p.policies) == 0 -} - -// PoliciesList is a list of staged policies. -type PoliciesList []StagedPolicies - -// IsDefault determines whether this is a default policies list. -func (l PoliciesList) IsDefault() bool { - return len(l) == 1 && l[0].IsDefault() -} diff --git a/policy/policy_benchmark_test.go b/policy/policy_benchmark_test.go index 55eb86a..3aeadae 100644 --- a/policy/policy_benchmark_test.go +++ b/policy/policy_benchmark_test.go @@ -30,8 +30,8 @@ import ( var ( testNowNanos = time.Now().UnixNano() testPolicies = []Policy{ - NewPolicy(10*time.Second, xtime.Second, 2*24*time.Hour), - NewPolicy(time.Minute, xtime.Minute, 30*24*time.Hour), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 2*24*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 30*24*time.Hour), DefaultAggregationID), } ) diff --git a/policy/policy_test.go b/policy/policy_test.go index a625284..6b1f209 100644 --- a/policy/policy_test.go +++ b/policy/policy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -25,6 +25,7 @@ import ( "testing" "time" + "github.com/m3db/m3metrics/generated/proto/schema" "github.com/m3db/m3x/time" "github.com/stretchr/testify/require" @@ -36,75 +37,15 @@ func TestPolicyString(t *testing.T) { p Policy expected string }{ - {p: NewPolicy(10*time.Second, xtime.Second, time.Hour), expected: "10s@1s:1h0m0s"}, - {p: NewPolicy(time.Minute, xtime.Minute, 12*time.Hour), expected: "1m0s@1m:12h0m0s"}, + {p: NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, time.Hour), DefaultAggregationID), expected: "10s@1s:1h0m0s"}, + {p: NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), mustCompress(Mean, P999)), expected: "1m0s@1m:12h0m0s|Mean,P999"}, + {p: NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), mustCompress(Mean)), expected: "1m0s@1m:12h0m0s|Mean"}, } for _, input := range inputs { require.Equal(t, input.expected, input.p.String()) } } -func TestParsePolicy(t *testing.T) { - inputs := []struct { - str string - expected Policy - }{ - { - str: "1s:1h", - expected: NewPolicy(time.Second, xtime.Second, time.Hour), - }, - { - str: "10s:1d", - expected: NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), - }, - { - str: "60s:24h", - expected: NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - }, - { - str: "1m:1d", - expected: NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - }, - { - str: "1s@1s:1h", - expected: NewPolicy(time.Second, xtime.Second, time.Hour), - }, - { - str: "10s@1s:1d", - expected: NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), - }, - { - str: "60s@1s:24h", - expected: NewPolicy(time.Minute, xtime.Second, 24*time.Hour), - }, - { - str: "1m@1m:1d", - expected: NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - }, - } - for _, input := range inputs { - res, err := ParsePolicy(input.str) - require.NoError(t, err) - require.Equal(t, input.expected, res) - } -} - -func TestParsePolicyErrors(t *testing.T) { - inputs := []string{ - "1s:1s:1s", - "0s:1d", - "10seconds:1s", - "10seconds@1s:1d", - "10s@2s:1d", - "0.1s@1s:1d", - "10s@2minutes:2d", - } - for _, input := range inputs { - _, err := ParsePolicy(input) - require.Error(t, err) - } -} - func TestPolicyUnmarshalYAML(t *testing.T) { inputs := []struct { str string @@ -112,35 +53,23 @@ func TestPolicyUnmarshalYAML(t *testing.T) { }{ { str: "1s:1h", - expected: NewPolicy(time.Second, xtime.Second, time.Hour), - }, - { - str: "10s:1d", - expected: NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), - }, - { - str: "60s:24h", - expected: NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), + expected: NewPolicy(NewStoragePolicy(time.Second, xtime.Second, time.Hour), DefaultAggregationID), }, { - str: "1m:1d", - expected: NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), + str: "10s:1d|Mean", + expected: NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), mustCompress(Mean)), }, { - str: "1s@1s:1h", - expected: NewPolicy(time.Second, xtime.Second, time.Hour), + str: "60s:24h|Mean,Count", + expected: NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), mustCompress(Mean, Count)), }, { - str: "10s@1s:1d", - expected: NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + str: "1m:1d|Count,Mean", + expected: NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), mustCompress(Mean, Count)), }, { - str: "60s@1s:24h", - expected: NewPolicy(time.Minute, xtime.Second, 24*time.Hour), - }, - { - str: "1m@1m:1d", - expected: NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), + str: "1s@1s:1h|P999,P9999", + expected: NewPolicy(NewStoragePolicy(time.Second, xtime.Second, time.Hour), mustCompress(P999, P9999)), }, } for _, input := range inputs { @@ -152,13 +81,14 @@ func TestPolicyUnmarshalYAML(t *testing.T) { func TestPolicyUnmarshalYAMLErrors(t *testing.T) { inputs := []string{ - "1s:1s:1s", - "0s:1d", - "10seconds:1s", - "10seconds@1s:1d", - "10s@2s:1d", - "0.1s@1s:1d", - "10s@2minutes:2d", + "|", + "|Mean", + "1s:1h|", + "1s:1h||", + "1s:1h|P99|", + "1s:1h|P", + "1s:1h|Meann", + "1s:1h|Mean,", } for _, input := range inputs { var p Policy @@ -166,169 +96,67 @@ func TestPolicyUnmarshalYAMLErrors(t *testing.T) { } } -func TestPoliciesByResolutionAsc(t *testing.T) { - inputs := []Policy{ - NewPolicy(10*time.Second, xtime.Second, 6*time.Hour), - NewPolicy(10*time.Second, xtime.Second, 2*time.Hour), - NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), - NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), - NewPolicy(time.Minute, xtime.Minute, time.Hour), - NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - NewPolicy(10*time.Minute, xtime.Minute, 48*time.Hour), - } - expected := []Policy{inputs[2], inputs[0], inputs[1], inputs[5], inputs[4], inputs[3], inputs[6]} - sort.Sort(ByResolutionAsc(inputs)) - require.Equal(t, expected, inputs) -} - -func TestStagedPoliciesHasDefaultPolicies(t *testing.T) { - sp := NewStagedPolicies(testNowNanos, true, nil) - require.Equal(t, testNowNanos, sp.CutoverNanos) - _, isDefault := sp.Policies() - require.True(t, isDefault) -} - -func TestStagedPoliciesHasCustomPolicies(t *testing.T) { - policies := []Policy{ - NewPolicy(10*time.Second, xtime.Second, 6*time.Hour), - NewPolicy(10*time.Second, xtime.Second, 2*time.Hour), - } - sp := NewStagedPolicies(testNowNanos, false, policies) - require.Equal(t, testNowNanos, sp.CutoverNanos) - actual, isDefault := sp.Policies() - require.False(t, isDefault) - require.Equal(t, policies, actual) -} - -func TestStagedPoliciesSamePoliciesDefaultPolicies(t *testing.T) { - inputs := []struct { - sp [2]StagedPolicies - expected bool - }{ - { - sp: [2]StagedPolicies{ - NewStagedPolicies(0, false, nil), - NewStagedPolicies(0, true, []Policy{}), - }, - expected: true, - }, - { - sp: [2]StagedPolicies{ - NewStagedPolicies(0, false, nil), - NewStagedPolicies(0, true, []Policy{ - NewPolicy(10*time.Second, xtime.Second, 6*time.Hour), - NewPolicy(time.Minute, xtime.Minute, 12*time.Hour), - }), +func TestNewPoliciesFromSchema(t *testing.T) { + input := []*schema.Policy{ + &schema.Policy{ + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(24 * time.Hour), + }, }, - expected: false, - }, - { - sp: [2]StagedPolicies{ - NewStagedPolicies(1000, false, []Policy{ - NewPolicy(10*time.Second, xtime.Second, 6*time.Hour), - NewPolicy(time.Minute, xtime.Minute, 12*time.Hour), - }), - NewStagedPolicies(0, true, []Policy{ - NewPolicy(10*time.Second, xtime.Second, 6*time.Hour), - NewPolicy(time.Minute, xtime.Minute, 12*time.Hour), - }), + AggregationTypes: []schema.AggregationType{ + schema.AggregationType_MEAN, + schema.AggregationType_P999, }, - expected: true, }, - { - sp: [2]StagedPolicies{ - NewStagedPolicies(1000, false, []Policy{ - NewPolicy(10*time.Second, xtime.Second, 6*time.Hour), - NewPolicy(time.Minute, xtime.Minute, 12*time.Hour), - }), - NewStagedPolicies(0, true, []Policy{ - NewPolicy(10*time.Second, xtime.Second, 6*time.Hour), - NewPolicy(time.Minute, xtime.Minute, 12*time.Hour), - NewPolicy(10*time.Minute, xtime.Minute, 24*time.Hour), - }), + &schema.Policy{ + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(240 * time.Hour), + }, }, - expected: false, - }, - { - sp: [2]StagedPolicies{ - NewStagedPolicies(0, true, []Policy{ - NewPolicy(10*time.Second, xtime.Second, 6*time.Hour), - NewPolicy(time.Minute, xtime.Minute, 12*time.Hour), - NewPolicy(10*time.Minute, xtime.Minute, 24*time.Hour), - }), - NewStagedPolicies(1000, false, []Policy{ - NewPolicy(10*time.Second, xtime.Second, 6*time.Hour), - NewPolicy(time.Minute, xtime.Minute, 12*time.Hour), - }), + AggregationTypes: []schema.AggregationType{ + schema.AggregationType_MEAN, + schema.AggregationType_P9999, }, - expected: false, }, } - for _, input := range inputs { - require.Equal(t, input.expected, input.sp[0].SamePolicies(input.sp[1])) - } -} -func TestStagedPoliciesIsEmpty(t *testing.T) { - inputs := []struct { - sp StagedPolicies - expected bool - }{ - { - sp: NewStagedPolicies(0, false, nil), - expected: true, - }, - { - sp: NewStagedPolicies(0, false, []Policy{}), - expected: true, - }, - { - sp: NewStagedPolicies(100, false, nil), - expected: false, - }, - { - sp: NewStagedPolicies(0, true, nil), - expected: false, - }, - { - sp: NewStagedPolicies(0, true, []Policy{ - NewPolicy(10*time.Second, xtime.Second, 6*time.Hour), - NewPolicy(time.Minute, xtime.Minute, 12*time.Hour), - }), - expected: false, - }, - } - for _, input := range inputs { - require.Equal(t, input.expected, input.sp.IsDefault()) - } + res, err := NewPoliciesFromSchema(input) + require.NoError(t, err) + require.Equal(t, []Policy{ + Policy{ + sp: NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), + aggID: mustCompress(Mean, P999), + }, + Policy{ + sp: NewStoragePolicy(time.Minute, xtime.Minute, 240*time.Hour), + aggID: mustCompress(Mean, P9999), + }, + }, res) } -func TestPoliciesListIsDefault(t *testing.T) { - inputs := []struct { - pl PoliciesList - expected bool - }{ - { - pl: DefaultPoliciesList, - expected: true, - }, - { - pl: []StagedPolicies{}, - expected: false, - }, - { - pl: []StagedPolicies{NewStagedPolicies(0, true, []Policy{ - NewPolicy(10*time.Second, xtime.Second, 6*time.Hour), - NewPolicy(time.Minute, xtime.Minute, 12*time.Hour), - })}, - expected: false, - }, - { - pl: []StagedPolicies{DefaultStagedPolicies, DefaultStagedPolicies}, - expected: false, - }, - } - for _, input := range inputs { - require.Equal(t, input.expected, input.pl.IsDefault()) - } +func TestPoliciesByResolutionAsc(t *testing.T) { + inputs := []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), AggregationID{100}), + NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), AggregationID{100}), + } + expected := []Policy{inputs[2], inputs[0], inputs[1], inputs[5], inputs[4], inputs[3], inputs[7], inputs[6], inputs[8]} + sort.Sort(ByResolutionAsc(inputs)) + require.Equal(t, expected, inputs) } diff --git a/policy/staged_policy.go b/policy/staged_policy.go new file mode 100644 index 0000000..eef1cab --- /dev/null +++ b/policy/staged_policy.go @@ -0,0 +1,129 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package policy + +import ( + "bytes" + "fmt" + "time" +) + +var ( + // DefaultStagedPolicies represents a default staged policies. + DefaultStagedPolicies StagedPolicies + + // DefaultPoliciesList represents a default policies list. + DefaultPoliciesList = PoliciesList{DefaultStagedPolicies} +) + +// StagedPolicies represent a list of policies at a specified version. +type StagedPolicies struct { + // Cutover is when the policies take effect. + CutoverNanos int64 + + // Tombstoned determines whether the associated (rollup) metric has been tombstoned. + Tombstoned bool + + // policies represent the list of policies. + policies []Policy +} + +// NewStagedPolicies create a new staged policies. +func NewStagedPolicies(cutoverNanos int64, tombstoned bool, policies []Policy) StagedPolicies { + return StagedPolicies{CutoverNanos: cutoverNanos, Tombstoned: tombstoned, policies: policies} +} + +// Reset resets the staged policies. +func (p *StagedPolicies) Reset() { *p = DefaultStagedPolicies } + +// IsDefault returns whether this is a default staged policies. +func (p StagedPolicies) IsDefault() bool { + return p.CutoverNanos == 0 && !p.Tombstoned && p.hasDefaultPolicies() +} + +// Policies returns the policies and whether the policies are the default policies. +func (p StagedPolicies) Policies() ([]Policy, bool) { + return p.policies, p.hasDefaultPolicies() +} + +// SamePolicies returns whether two staged policies have the same policy list, +// assuming the policies are sorted in the same order. +func (p StagedPolicies) SamePolicies(other StagedPolicies) bool { + currPolicies, currIsDefault := p.Policies() + otherPolicies, otherIsDefault := other.Policies() + if currIsDefault && otherIsDefault { + return true + } + if currIsDefault || otherIsDefault { + return false + } + if len(currPolicies) != len(otherPolicies) { + return false + } + for i := 0; i < len(currPolicies); i++ { + if currPolicies[i] != otherPolicies[i] { + return false + } + } + return true +} + +// String is the representation of staged policies. +func (p StagedPolicies) String() string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("{cutover:%s,tombstoned:%v,policies:[", time.Unix(0, p.CutoverNanos).String(), p.Tombstoned)) + for i := range p.policies { + buf.WriteString(p.policies[i].String()) + if i < len(p.policies)-1 { + buf.WriteString(",") + } + } + buf.WriteString("]}") + return buf.String() +} + +func (p StagedPolicies) hasDefaultPolicies() bool { + return len(p.policies) == 0 +} + +// PoliciesList is a list of staged policies. +type PoliciesList []StagedPolicies + +// IsDefault determines whether this is a default policies list. +func (l PoliciesList) IsDefault() bool { + return len(l) == 1 && l[0].IsDefault() +} + +// WithDefaultAggregation updates the PoliciesList with default aggregation types. +func (l PoliciesList) WithDefaultAggregation() PoliciesList { + for _, sp := range l { + pl, ok := sp.Policies() + if ok { + continue + } + for j := range pl { + if !pl[j].aggID.IsDefault() { + sp.policies[j] = NewPolicy(pl[j].sp, DefaultAggregationID) + } + } + } + return l +} diff --git a/policy/staged_policy_test.go b/policy/staged_policy_test.go new file mode 100644 index 0000000..bb1530a --- /dev/null +++ b/policy/staged_policy_test.go @@ -0,0 +1,236 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package policy + +import ( + "testing" + "time" + + "github.com/m3db/m3x/time" + "github.com/stretchr/testify/require" +) + +func TestStagedPoliciesHasDefaultPolicies(t *testing.T) { + sp := NewStagedPolicies(testNowNanos, true, nil) + require.Equal(t, testNowNanos, sp.CutoverNanos) + _, isDefault := sp.Policies() + require.True(t, isDefault) +} + +func TestStagedPoliciesHasCustomPolicies(t *testing.T) { + policies := []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), + } + sp := NewStagedPolicies(testNowNanos, false, policies) + require.Equal(t, testNowNanos, sp.CutoverNanos) + actual, isDefault := sp.Policies() + require.False(t, isDefault) + require.Equal(t, policies, actual) +} + +func TestStagedPoliciesSamePoliciesDefaultPolicies(t *testing.T) { + inputs := []struct { + sp [2]StagedPolicies + expected bool + }{ + { + sp: [2]StagedPolicies{ + NewStagedPolicies(0, false, nil), + NewStagedPolicies(0, true, []Policy{}), + }, + expected: true, + }, + { + sp: [2]StagedPolicies{ + NewStagedPolicies(0, false, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), mustCompress(Lower, Upper)), + }), + NewStagedPolicies(0, false, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), mustCompress(Upper, Lower)), + }), + }, + expected: true, + }, + { + sp: [2]StagedPolicies{ + NewStagedPolicies(0, false, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), mustCompress(Upper)), + }), + NewStagedPolicies(0, false, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), mustCompress(Last)), + }), + }, + expected: false, + }, + { + sp: [2]StagedPolicies{ + NewStagedPolicies(0, false, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), mustCompress(Upper)), + }), + NewStagedPolicies(0, false, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), mustCompress(Upper, Lower)), + }), + }, + expected: false, + }, + { + sp: [2]StagedPolicies{ + NewStagedPolicies(0, false, nil), + NewStagedPolicies(0, true, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + }), + }, + expected: false, + }, + { + sp: [2]StagedPolicies{ + NewStagedPolicies(1000, false, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + }), + NewStagedPolicies(0, true, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + }), + }, + expected: true, + }, + { + sp: [2]StagedPolicies{ + NewStagedPolicies(1000, false, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + }), + NewStagedPolicies(0, true, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 24*time.Hour), DefaultAggregationID), + }), + }, + expected: false, + }, + { + sp: [2]StagedPolicies{ + NewStagedPolicies(0, true, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(10*time.Minute, xtime.Minute, 24*time.Hour), DefaultAggregationID), + }), + NewStagedPolicies(1000, false, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + }), + }, + expected: false, + }, + } + for _, input := range inputs { + require.Equal(t, input.expected, input.sp[0].SamePolicies(input.sp[1])) + } +} + +func TestStagedPoliciesIsEmpty(t *testing.T) { + inputs := []struct { + sp StagedPolicies + expected bool + }{ + { + sp: NewStagedPolicies(0, false, nil), + expected: true, + }, + { + sp: NewStagedPolicies(0, false, []Policy{}), + expected: true, + }, + { + sp: NewStagedPolicies(100, false, nil), + expected: false, + }, + { + sp: NewStagedPolicies(0, true, nil), + expected: false, + }, + { + sp: NewStagedPolicies(0, true, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + }), + expected: false, + }, + } + for _, input := range inputs { + require.Equal(t, input.expected, input.sp.IsDefault()) + } +} + +func TestPoliciesListIsDefault(t *testing.T) { + inputs := []struct { + pl PoliciesList + expected bool + }{ + { + pl: DefaultPoliciesList, + expected: true, + }, + { + pl: []StagedPolicies{}, + expected: false, + }, + { + pl: []StagedPolicies{NewStagedPolicies(0, true, []Policy{ + NewPolicy(NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), DefaultAggregationID), + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), DefaultAggregationID), + })}, + expected: false, + }, + { + pl: []StagedPolicies{DefaultStagedPolicies, DefaultStagedPolicies}, + expected: false, + }, + } + for _, input := range inputs { + require.Equal(t, input.expected, input.pl.IsDefault()) + } +} + +func TestPoliciesListWithDefaultAggregation(t *testing.T) { + pl := PoliciesList( + []StagedPolicies{ + NewStagedPolicies(100, true, []Policy{ + NewPolicy(NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), mustCompress(Upper)), + }), + }, + ) + + pl2 := pl.WithDefaultAggregation() + + require.Equal(t, pl, pl2) + require.Equal(t, 1, len(pl2)) + + require.Equal(t, int64(100), pl2[0].CutoverNanos) + require.Equal(t, true, pl2[0].Tombstoned) + for i := range pl2[0].policies { + require.Equal(t, NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), pl2[0].policies[i].sp) + require.Equal(t, DefaultAggregationID, pl2[0].policies[i].aggID) + } +} diff --git a/policy/storage_policy.go b/policy/storage_policy.go new file mode 100644 index 0000000..9cc8bad --- /dev/null +++ b/policy/storage_policy.go @@ -0,0 +1,123 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package policy + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/m3db/m3metrics/generated/proto/schema" + "github.com/m3db/m3x/time" +) + +const ( + resolutionRetentionSeparator = ":" +) + +var ( + // DefaultStoragePolicy represents a default storage policy. + DefaultStoragePolicy StoragePolicy + + errNilStoragePolicySchema = errors.New("nil storage policy schema") + errInvalidStoragePolicyString = errors.New("invalid storage policy string") +) + +// StoragePolicy represents the resolution and retention period metric datapoints +// are stored at. +type StoragePolicy struct { + resolution Resolution + retention Retention +} + +// NewStoragePolicy creates a new storage policy given a resolution window size and retention. +func NewStoragePolicy(window time.Duration, precision xtime.Unit, retention time.Duration) StoragePolicy { + return StoragePolicy{ + resolution: Resolution{ + Window: window, + Precision: precision, + }, + retention: Retention(retention), + } +} + +// NewStoragePolicyFromSchema creates a new unaggregated policy from a schema policy. +func NewStoragePolicyFromSchema(p *schema.StoragePolicy) (StoragePolicy, error) { + if p == nil { + return DefaultStoragePolicy, errNilStoragePolicySchema + } + precision := time.Duration(p.Resolution.Precision) + unit, err := xtime.UnitFromDuration(precision) + if err != nil { + return DefaultStoragePolicy, err + } + + return NewStoragePolicy(time.Duration(p.Resolution.WindowSize), unit, time.Duration(p.Retention.Period)), nil + +} + +// String is the string representation of a policy. +func (p StoragePolicy) String() string { + return fmt.Sprintf("%s%s%s", p.resolution.String(), resolutionRetentionSeparator, p.retention.String()) +} + +// Resolution returns the resolution of the policy. +func (p StoragePolicy) Resolution() Resolution { + return p.resolution +} + +// Retention return the retention of the policy. +func (p StoragePolicy) Retention() Retention { + return p.retention +} + +// UnmarshalYAML unmarshals a policy value from a string. +func (p *StoragePolicy) UnmarshalYAML(unmarshal func(interface{}) error) error { + var str string + if err := unmarshal(&str); err != nil { + return err + } + parsed, err := ParseStoragePolicy(str) + if err != nil { + return err + } + *p = parsed + return nil +} + +// ParseStoragePolicy parses a policy in the form of resolution:retention. +func ParseStoragePolicy(str string) (StoragePolicy, error) { + parts := strings.Split(str, resolutionRetentionSeparator) + if len(parts) != 2 { + return DefaultStoragePolicy, errInvalidStoragePolicyString + } + resolution, err := ParseResolution(parts[0]) + if err != nil { + return DefaultStoragePolicy, err + } + retentionDuration, err := xtime.ParseExtendedDuration(parts[1]) + if err != nil { + return DefaultStoragePolicy, err + } + retention := Retention(retentionDuration) + return StoragePolicy{resolution: resolution, retention: retention}, nil +} diff --git a/policy/storage_policy_test.go b/policy/storage_policy_test.go new file mode 100644 index 0000000..0faaf23 --- /dev/null +++ b/policy/storage_policy_test.go @@ -0,0 +1,205 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package policy + +import ( + "testing" + "time" + + "github.com/m3db/m3metrics/generated/proto/schema" + "github.com/m3db/m3x/time" + + "github.com/stretchr/testify/require" + yaml "gopkg.in/yaml.v2" +) + +func TestStoragePolicyString(t *testing.T) { + inputs := []struct { + p StoragePolicy + expected string + }{ + {p: NewStoragePolicy(10*time.Second, xtime.Second, time.Hour), expected: "10s@1s:1h0m0s"}, + {p: NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour), expected: "1m0s@1m:12h0m0s"}, + } + for _, input := range inputs { + require.Equal(t, input.expected, input.p.String()) + } +} + +func TestParseStoragePolicy(t *testing.T) { + inputs := []struct { + str string + expected StoragePolicy + }{ + { + str: "1s:1h", + expected: NewStoragePolicy(time.Second, xtime.Second, time.Hour), + }, + { + str: "10s:1d", + expected: NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), + }, + { + str: "60s:24h", + expected: NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), + }, + { + str: "1m:1d", + expected: NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), + }, + { + str: "1s@1s:1h", + expected: NewStoragePolicy(time.Second, xtime.Second, time.Hour), + }, + { + str: "10s@1s:1d", + expected: NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), + }, + { + str: "60s@1s:24h", + expected: NewStoragePolicy(time.Minute, xtime.Second, 24*time.Hour), + }, + { + str: "1m@1m:1d", + expected: NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), + }, + } + for _, input := range inputs { + res, err := ParseStoragePolicy(input.str) + require.NoError(t, err) + require.Equal(t, input.expected, res) + } +} + +func TestParseStoragePolicyErrors(t *testing.T) { + inputs := []string{ + "1s:1s:1s", + "0s:1d", + "10seconds:1s", + "10seconds@1s:1d", + "10s@2s:1d", + "0.1s@1s:1d", + "10s@2minutes:2d", + } + for _, input := range inputs { + _, err := ParseStoragePolicy(input) + require.Error(t, err) + } +} + +func TestStoragePolicyUnmarshalYAML(t *testing.T) { + inputs := []struct { + str string + expected StoragePolicy + }{ + { + str: "1s:1h", + expected: NewStoragePolicy(time.Second, xtime.Second, time.Hour), + }, + { + str: "10s:1d", + expected: NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), + }, + { + str: "60s:24h", + expected: NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), + }, + { + str: "1m:1d", + expected: NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), + }, + { + str: "1s@1s:1h", + expected: NewStoragePolicy(time.Second, xtime.Second, time.Hour), + }, + { + str: "10s@1s:1d", + expected: NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), + }, + { + str: "60s@1s:24h", + expected: NewStoragePolicy(time.Minute, xtime.Second, 24*time.Hour), + }, + { + str: "1m@1m:1d", + expected: NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), + }, + } + for _, input := range inputs { + var p StoragePolicy + require.NoError(t, yaml.Unmarshal([]byte(input.str), &p)) + require.Equal(t, input.expected, p) + } +} + +func TestStoragePolicyUnmarshalYAMLErrors(t *testing.T) { + inputs := []string{ + "1s:1s:1s", + "0s:1d", + "10seconds:1s", + "10seconds@1s:1d", + "10s@2s:1d", + "0.1s@1s:1d", + "10s@2minutes:2d", + } + for _, input := range inputs { + var p StoragePolicy + require.Error(t, yaml.Unmarshal([]byte(input), &p)) + } +} + +func TestNewStoragePolicyFromSchema(t *testing.T) { + inputs := []struct { + s *schema.StoragePolicy + p StoragePolicy + }{ + { + s: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(24 * time.Hour), + }, + }, + p: NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), + }, + { + s: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(240 * time.Hour), + }, + }, + p: NewStoragePolicy(time.Minute, xtime.Minute, 240*time.Hour), + }, + } + + for _, input := range inputs { + res, err := NewStoragePolicyFromSchema(input.s) + require.NoError(t, err) + require.Equal(t, input.p, res) + } +} diff --git a/protocol/msgpack/aggregated_encoder.go b/protocol/msgpack/aggregated_encoder.go index 9820455..a70bc88 100644 --- a/protocol/msgpack/aggregated_encoder.go +++ b/protocol/msgpack/aggregated_encoder.go @@ -25,7 +25,7 @@ import ( "github.com/m3db/m3metrics/policy" ) -type encodeRawMetricWithPolicyFn func(data []byte, p policy.Policy) +type encodeRawMetricWithPolicyFn func(data []byte, p policy.StoragePolicy) type encodeRawMetricFn func(data []byte) type encodeMetricAsRawFn func(m aggregated.Metric) []byte type encodeChunkedMetricAsRawFn func(m aggregated.ChunkedMetric) []byte @@ -70,7 +70,7 @@ func (enc *aggregatedEncoder) EncodeMetricWithPolicy(mp aggregated.MetricWithPol } enc.encodeRootObjectFn(rawMetricWithPolicyType) data := enc.encodeMetricAsRawFn(mp.Metric) - enc.encodeRawMetricWithPolicyFn(data, mp.Policy) + enc.encodeRawMetricWithPolicyFn(data, mp.StoragePolicy) return enc.err() } @@ -80,7 +80,7 @@ func (enc *aggregatedEncoder) EncodeChunkedMetricWithPolicy(cmp aggregated.Chunk } enc.encodeRootObjectFn(rawMetricWithPolicyType) data := enc.encodeChunkedMetricAsRawFn(cmp.ChunkedMetric) - enc.encodeRawMetricWithPolicyFn(data, cmp.Policy) + enc.encodeRawMetricWithPolicyFn(data, cmp.StoragePolicy) return enc.err() } @@ -89,7 +89,7 @@ func (enc *aggregatedEncoder) EncodeRawMetricWithPolicy(rp aggregated.RawMetricW return err } enc.encodeRootObjectFn(rawMetricWithPolicyType) - enc.encodeRawMetricWithPolicyFn(rp.RawMetric.Bytes(), rp.Policy) + enc.encodeRawMetricWithPolicyFn(rp.RawMetric.Bytes(), rp.StoragePolicy) return enc.err() } @@ -122,10 +122,10 @@ func (enc *aggregatedEncoder) encodeMetricProlog() { enc.buf.encodeNumObjectFields(numFieldsForType(metricType)) } -func (enc *aggregatedEncoder) encodeRawMetricWithPolicy(data []byte, p policy.Policy) { +func (enc *aggregatedEncoder) encodeRawMetricWithPolicy(data []byte, p policy.StoragePolicy) { enc.encodeNumObjectFields(numFieldsForType(rawMetricWithPolicyType)) enc.encodeRawMetricFn(data) - enc.encodePolicy(p) + enc.encodeStoragePolicy(p) } func (enc *aggregatedEncoder) encodeRawMetric(data []byte) { diff --git a/protocol/msgpack/aggregated_encoder_test.go b/protocol/msgpack/aggregated_encoder_test.go index 36a7437..8ece687 100644 --- a/protocol/msgpack/aggregated_encoder_test.go +++ b/protocol/msgpack/aggregated_encoder_test.go @@ -35,7 +35,7 @@ func testCapturingAggregatedEncoder(t *testing.T) (AggregatedEncoder, *[]interfa return encoder, result } -func expectedResultsForRawMetricWithPolicy(t *testing.T, m aggregated.RawMetric, p policy.Policy) []interface{} { +func expectedResultsForRawMetricWithPolicy(t *testing.T, m aggregated.RawMetric, p policy.StoragePolicy) []interface{} { results := []interface{}{ numFieldsForType(rawMetricWithPolicyType), m.Bytes(), @@ -44,7 +44,7 @@ func expectedResultsForRawMetricWithPolicy(t *testing.T, m aggregated.RawMetric, return results } -func expectedResultsForAggregatedMetricWithPolicy(t *testing.T, m interface{}, p policy.Policy) []interface{} { +func expectedResultsForAggregatedMetricWithPolicy(t *testing.T, m interface{}, p policy.StoragePolicy) []interface{} { results := []interface{}{ int64(aggregatedVersion), numFieldsForType(rootObjectType), diff --git a/protocol/msgpack/aggregated_iterator.go b/protocol/msgpack/aggregated_iterator.go index 530ca07..2d08e92 100644 --- a/protocol/msgpack/aggregated_iterator.go +++ b/protocol/msgpack/aggregated_iterator.go @@ -36,7 +36,7 @@ type aggregatedIterator struct { iteratorPool AggregatedIteratorPool closed bool metric aggregated.RawMetric - policy policy.Policy + policy policy.StoragePolicy } // NewAggregatedIterator creates a new aggregated iterator. @@ -60,7 +60,7 @@ func (it *aggregatedIterator) Reset(reader io.Reader) { it.reset(reader) } -func (it *aggregatedIterator) Value() (aggregated.RawMetric, policy.Policy) { +func (it *aggregatedIterator) Value() (aggregated.RawMetric, policy.StoragePolicy) { return it.metric, it.policy } @@ -124,7 +124,7 @@ func (it *aggregatedIterator) decodeRawMetricWithPolicy() { return } it.metric.Reset(it.decodeRawMetric()) - it.policy = it.decodePolicy() + it.policy = it.decodeStoragePolicy() it.skip(numActualFields - numExpectedFields) } diff --git a/protocol/msgpack/aggregated_iterator_test.go b/protocol/msgpack/aggregated_iterator_test.go index a4c395b..9cc85bf 100644 --- a/protocol/msgpack/aggregated_iterator_test.go +++ b/protocol/msgpack/aggregated_iterator_test.go @@ -107,10 +107,10 @@ func TestAggregatedIteratorDecodeRawMetricMoreFieldsThanExpected(t *testing.T) { enc := testAggregatedEncoder(t).(*aggregatedEncoder) // Pretend we added an extra int field to the raw metric with policy object. - enc.encodeRawMetricWithPolicyFn = func(data []byte, p policy.Policy) { + enc.encodeRawMetricWithPolicyFn = func(data []byte, p policy.StoragePolicy) { enc.encodeNumObjectFields(numFieldsForType(rawMetricWithPolicyType) + 1) enc.encodeRawMetricFn(data) - enc.encodePolicy(p) + enc.encodeStoragePolicy(p) } testAggregatedEncode(t, enc, input.metric.(aggregated.Metric), input.policy) enc.encodeVarint(0) diff --git a/protocol/msgpack/aggregated_roundtrip_test.go b/protocol/msgpack/aggregated_roundtrip_test.go index 664b2dc..ac7195e 100644 --- a/protocol/msgpack/aggregated_roundtrip_test.go +++ b/protocol/msgpack/aggregated_roundtrip_test.go @@ -55,12 +55,12 @@ var ( TimeNanos: time.Now().UnixNano(), Value: 678.90, } - testPolicy = policy.NewPolicy(time.Second, xtime.Second, time.Hour) + testPolicy = policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour) ) type metricWithPolicy struct { metric interface{} - policy policy.Policy + policy policy.StoragePolicy } func testAggregatedEncoder(t *testing.T) AggregatedEncoder { @@ -71,22 +71,22 @@ func testAggregatedIterator(t *testing.T, reader io.Reader) AggregatedIterator { return NewAggregatedIterator(reader, NewAggregatedIteratorOptions()) } -func testAggregatedEncode(t *testing.T, encoder AggregatedEncoder, m interface{}, p policy.Policy) error { +func testAggregatedEncode(t *testing.T, encoder AggregatedEncoder, m interface{}, p policy.StoragePolicy) error { switch m := m.(type) { case aggregated.Metric: return encoder.EncodeMetricWithPolicy(aggregated.MetricWithPolicy{ - Metric: m, - Policy: p, + Metric: m, + StoragePolicy: p, }) case aggregated.ChunkedMetric: return encoder.EncodeChunkedMetricWithPolicy(aggregated.ChunkedMetricWithPolicy{ ChunkedMetric: m, - Policy: p, + StoragePolicy: p, }) case aggregated.RawMetric: return encoder.EncodeRawMetricWithPolicy(aggregated.RawMetricWithPolicy{ - RawMetric: m, - Policy: p, + RawMetric: m, + StoragePolicy: p, }) default: return fmt.Errorf("unrecognized metric type: %T", m) diff --git a/protocol/msgpack/base_encoder.go b/protocol/msgpack/base_encoder.go index f317268..64d411a 100644 --- a/protocol/msgpack/base_encoder.go +++ b/protocol/msgpack/base_encoder.go @@ -25,56 +25,61 @@ import ( "github.com/m3db/m3metrics/policy" ) -type encodePolicyFn func(p policy.Policy) type encodeVarintFn func(value int64) type encodeBoolFn func(value bool) type encodeFloat64Fn func(value float64) type encodeBytesFn func(value []byte) type encodeBytesLenFn func(value int) type encodeArrayLenFn func(value int) +type encodeStoragePolicyFn func(p policy.StoragePolicy) +type encodePolicyFn func(p policy.Policy) // baseEncoder is the base encoder that provides common encoding APIs. type baseEncoder struct { - bufEncoder BufferedEncoder - encodeErr error - encodePolicyFn encodePolicyFn - encodeVarintFn encodeVarintFn - encodeBoolFn encodeBoolFn - encodeFloat64Fn encodeFloat64Fn - encodeBytesFn encodeBytesFn - encodeBytesLenFn encodeBytesLenFn - encodeArrayLenFn encodeArrayLenFn + bufEncoder BufferedEncoder + encodeErr error + encodeVarintFn encodeVarintFn + encodeBoolFn encodeBoolFn + encodeFloat64Fn encodeFloat64Fn + encodeBytesFn encodeBytesFn + encodeBytesLenFn encodeBytesLenFn + encodeArrayLenFn encodeArrayLenFn + encodeStoragePolicyFn encodeStoragePolicyFn + encodePolicyFn encodePolicyFn } func newBaseEncoder(encoder BufferedEncoder) encoderBase { enc := &baseEncoder{bufEncoder: encoder} - enc.encodePolicyFn = enc.encodePolicyInternal enc.encodeVarintFn = enc.encodeVarintInternal enc.encodeBoolFn = enc.encodeBoolInternal enc.encodeFloat64Fn = enc.encodeFloat64Internal enc.encodeBytesFn = enc.encodeBytesInternal enc.encodeBytesLenFn = enc.encodeBytesLenInternal enc.encodeArrayLenFn = enc.encodeArrayLenInternal + enc.encodeStoragePolicyFn = enc.encodeStoragePolicyInternal + enc.encodePolicyFn = enc.encodePolicyInternal return enc } -func (enc *baseEncoder) encoder() BufferedEncoder { return enc.bufEncoder } -func (enc *baseEncoder) err() error { return enc.encodeErr } -func (enc *baseEncoder) resetData() { enc.bufEncoder.Reset() } -func (enc *baseEncoder) encodePolicy(p policy.Policy) { enc.encodePolicyFn(p) } -func (enc *baseEncoder) encodeVersion(version int) { enc.encodeVarint(int64(version)) } -func (enc *baseEncoder) encodeObjectType(objType objectType) { enc.encodeVarint(int64(objType)) } -func (enc *baseEncoder) encodeNumObjectFields(numFields int) { enc.encodeArrayLen(numFields) } -func (enc *baseEncoder) encodeRawID(id id.RawID) { enc.encodeBytes([]byte(id)) } -func (enc *baseEncoder) encodeVarint(value int64) { enc.encodeVarintFn(value) } -func (enc *baseEncoder) encodeBool(value bool) { enc.encodeBoolFn(value) } -func (enc *baseEncoder) encodeFloat64(value float64) { enc.encodeFloat64Fn(value) } -func (enc *baseEncoder) encodeBytes(value []byte) { enc.encodeBytesFn(value) } -func (enc *baseEncoder) encodeBytesLen(value int) { enc.encodeBytesLenFn(value) } -func (enc *baseEncoder) encodeArrayLen(value int) { enc.encodeArrayLenFn(value) } - +func (enc *baseEncoder) encoder() BufferedEncoder { return enc.bufEncoder } +func (enc *baseEncoder) err() error { return enc.encodeErr } +func (enc *baseEncoder) resetData() { enc.bufEncoder.Reset() } +func (enc *baseEncoder) encodeVersion(version int) { enc.encodeVarint(int64(version)) } +func (enc *baseEncoder) encodeObjectType(objType objectType) { enc.encodeVarint(int64(objType)) } +func (enc *baseEncoder) encodeNumObjectFields(numFields int) { enc.encodeArrayLen(numFields) } +func (enc *baseEncoder) encodeRawID(id id.RawID) { enc.encodeBytes([]byte(id)) } +func (enc *baseEncoder) encodeVarint(value int64) { enc.encodeVarintFn(value) } +func (enc *baseEncoder) encodeBool(value bool) { enc.encodeBoolFn(value) } +func (enc *baseEncoder) encodeFloat64(value float64) { enc.encodeFloat64Fn(value) } +func (enc *baseEncoder) encodeBytes(value []byte) { enc.encodeBytesFn(value) } +func (enc *baseEncoder) encodeBytesLen(value int) { enc.encodeBytesLenFn(value) } +func (enc *baseEncoder) encodeArrayLen(value int) { enc.encodeArrayLenFn(value) } +func (enc *baseEncoder) encodeStoragePolicy(p policy.StoragePolicy) { enc.encodeStoragePolicyFn(p) } +func (enc *baseEncoder) encodePolicy(p policy.Policy) { + enc.encodePolicyFn(p) +} func (enc *baseEncoder) reset(encoder BufferedEncoder) { enc.bufEncoder = encoder enc.encodeErr = nil @@ -89,6 +94,35 @@ func (enc *baseEncoder) encodeChunkedID(id id.ChunkedID) { func (enc *baseEncoder) encodePolicyInternal(p policy.Policy) { enc.encodeNumObjectFields(numFieldsForType(policyType)) + enc.encodeStoragePolicyFn(p.StoragePolicy()) + enc.encodeCompressedAggregationTypes(p.AggregationID()) +} + +func (enc *baseEncoder) encodeCompressedAggregationTypes(aggTypes policy.AggregationID) { + if aggTypes.IsDefault() { + enc.encodeNumObjectFields(numFieldsForType(defaultAggregationID)) + enc.encodeObjectType(defaultAggregationID) + return + } + + if policy.AggregationIDLen == 1 { + enc.encodeNumObjectFields(numFieldsForType(shortAggregationID)) + enc.encodeObjectType(shortAggregationID) + enc.encodeVarintFn(int64(aggTypes[0])) + return + } + + // NB(cw): Only reachable after we start to support more than 63 aggregation types + enc.encodeNumObjectFields(numFieldsForType(longAggregationID)) + enc.encodeObjectType(longAggregationID) + enc.encodeArrayLen(policy.AggregationIDLen) + for _, v := range aggTypes { + enc.encodeVarint(int64(v)) + } +} + +func (enc *baseEncoder) encodeStoragePolicyInternal(p policy.StoragePolicy) { + enc.encodeNumObjectFields(numFieldsForType(storagePolicyType)) enc.encodeResolution(p.Resolution()) enc.encodeRetention(p.Retention()) } diff --git a/protocol/msgpack/base_iterator.go b/protocol/msgpack/base_iterator.go index 842bf97..182c3ea 100644 --- a/protocol/msgpack/base_iterator.go +++ b/protocol/msgpack/base_iterator.go @@ -73,11 +73,57 @@ func (it *baseIterator) reader() bufReader { return it.bufReader } func (it *baseIterator) decodePolicy() policy.Policy { numExpectedFields, numActualFields, ok := it.checkNumFieldsForType(policyType) if !ok { - return policy.EmptyPolicy + return policy.DefaultPolicy + } + p := it.decodeStoragePolicy() + aggTypes := it.decodeCompressedAggregationTypes() + it.skip(numActualFields - numExpectedFields) + return policy.NewPolicy(p, aggTypes) +} + +func (it *baseIterator) decodeCompressedAggregationTypes() policy.AggregationID { + numActualFields := it.decodeNumObjectFields() + aggregationEncodeType := it.decodeObjectType() + numExpectedFields, ok := it.checkExpectedNumFieldsForType( + aggregationEncodeType, + numActualFields, + ) + if !ok { + return policy.DefaultAggregationID + } + + var aggTypes policy.AggregationID + switch aggregationEncodeType { + case defaultAggregationID: + case shortAggregationID: + value := it.decodeVarint() + aggTypes[0] = uint64(value) + case longAggregationID: + numValues := it.decodeArrayLen() + if numValues > policy.AggregationIDLen { + it.decodeErr = fmt.Errorf("invalid CompressedAggregationType length: %d", numValues) + return aggTypes + } + + for i := 0; i < numValues; i++ { + aggTypes[i] = uint64(it.decodeVarint()) + } + default: + it.decodeErr = fmt.Errorf("unrecognized aggregation encode type %v", aggregationEncodeType) + return aggTypes + } + it.skip(numActualFields - numExpectedFields) + return aggTypes +} + +func (it *baseIterator) decodeStoragePolicy() policy.StoragePolicy { + numExpectedFields, numActualFields, ok := it.checkNumFieldsForType(storagePolicyType) + if !ok { + return policy.DefaultStoragePolicy } resolution := it.decodeResolution() retention := it.decodeRetention() - p := policy.NewPolicy(resolution.Window, resolution.Precision, time.Duration(retention)) + p := policy.NewStoragePolicy(resolution.Window, resolution.Precision, time.Duration(retention)) it.skip(numActualFields - numExpectedFields) return p } diff --git a/protocol/msgpack/base_test.go b/protocol/msgpack/base_test.go new file mode 100644 index 0000000..0cbe506 --- /dev/null +++ b/protocol/msgpack/base_test.go @@ -0,0 +1,45 @@ +package msgpack + +import ( + "testing" + "time" + + "github.com/m3db/m3metrics/policy" + "github.com/m3db/m3x/time" + "github.com/stretchr/testify/require" +) + +func TestAggregationTypesRoundTrip(t *testing.T) { + inputs := []policy.AggregationID{ + policy.DefaultAggregationID, + policy.AggregationID{5}, + policy.AggregationID{100}, + policy.AggregationID{12345}, + } + + for _, input := range inputs { + enc := newBaseEncoder(NewBufferedEncoder()).(*baseEncoder) + it := newBaseIterator(enc.bufEncoder.Buffer(), 16).(*baseIterator) + + enc.encodeCompressedAggregationTypes(input) + r := it.decodeCompressedAggregationTypes() + require.Equal(t, input, r) + } +} + +func TestUnaggregatedPolicyRoundTrip(t *testing.T) { + inputs := []policy.Policy{ + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*24*time.Hour), policy.AggregationID{8}), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.AggregationID{100}), + } + + for _, input := range inputs { + enc := newBaseEncoder(NewBufferedEncoder()).(*baseEncoder) + enc.encodePolicy(input) + + it := newBaseIterator(enc.bufEncoder.Buffer(), 16).(*baseIterator) + r := it.decodePolicy() + require.Equal(t, input, r) + } +} diff --git a/protocol/msgpack/raw_metric_test.go b/protocol/msgpack/raw_metric_test.go index b68cb75..86281a0 100644 --- a/protocol/msgpack/raw_metric_test.go +++ b/protocol/msgpack/raw_metric_test.go @@ -235,11 +235,13 @@ type mockBaseIterator struct { decodeFloat64Fn decodeFloat64Fn } -func (it *mockBaseIterator) reset(reader io.Reader) {} -func (it *mockBaseIterator) err() error { return it.itErr } -func (it *mockBaseIterator) setErr(err error) { it.itErr = err } -func (it *mockBaseIterator) reader() bufReader { return it.bufReader } -func (it *mockBaseIterator) decodePolicy() policy.Policy { return policy.EmptyPolicy } +func (it *mockBaseIterator) reset(reader io.Reader) {} +func (it *mockBaseIterator) err() error { return it.itErr } +func (it *mockBaseIterator) setErr(err error) { it.itErr = err } +func (it *mockBaseIterator) reader() bufReader { return it.bufReader } +func (it *mockBaseIterator) decodeStoragePolicy() policy.StoragePolicy { + return policy.DefaultStoragePolicy +} func (it *mockBaseIterator) decodeVersion() int { return it.decodeVersionFn() } func (it *mockBaseIterator) decodeObjectType() objectType { return unknownType } func (it *mockBaseIterator) decodeNumObjectFields() int { return 0 } @@ -251,6 +253,9 @@ func (it *mockBaseIterator) decodeBytes() []byte { return nil } func (it *mockBaseIterator) decodeBytesLen() int { return it.decodeBytesLenFn() } func (it *mockBaseIterator) decodeArrayLen() int { return 0 } func (it *mockBaseIterator) skip(numFields int) {} +func (it *mockBaseIterator) decodePolicy() policy.Policy { + return policy.DefaultPolicy +} func (it *mockBaseIterator) checkNumFieldsForType(objType objectType) (int, int, bool) { return 0, 0, true diff --git a/protocol/msgpack/schema.go b/protocol/msgpack/schema.go index 1c00dc2..a9e3b37 100644 --- a/protocol/msgpack/schema.go +++ b/protocol/msgpack/schema.go @@ -53,11 +53,15 @@ const ( defaultPoliciesListType customPoliciesListType stagedPoliciesType - policyType + storagePolicyType knownResolutionType unknownResolutionType knownRetentionType unknownRetentionType + defaultAggregationID + shortAggregationID + longAggregationID + policyType // Total number of object types. numObjectTypes = iota @@ -76,11 +80,15 @@ const ( numDefaultStagedPoliciesListFields = 1 numCustomStagedPoliciesListFields = 2 numStagedPoliciesFields = 3 - numPolicyFields = 2 + numStoragePolicyFields = 2 numKnownResolutionFields = 2 numUnknownResolutionFields = 3 numKnownRetentionFields = 2 numUnknownRetentionFields = 2 + numDefaultAggregationIDFields = 1 + numShortAggregationIDFields = 2 + numLongAggregationIDFields = 2 + numPolicyFields = 2 ) // NB(xichen): use a slice instead of a map to avoid lookup overhead. @@ -109,9 +117,13 @@ func init() { setNumFieldsForType(defaultPoliciesListType, numDefaultStagedPoliciesListFields) setNumFieldsForType(customPoliciesListType, numCustomStagedPoliciesListFields) setNumFieldsForType(stagedPoliciesType, numStagedPoliciesFields) - setNumFieldsForType(policyType, numPolicyFields) + setNumFieldsForType(storagePolicyType, numStoragePolicyFields) setNumFieldsForType(knownResolutionType, numKnownResolutionFields) setNumFieldsForType(unknownResolutionType, numUnknownResolutionFields) setNumFieldsForType(knownRetentionType, numKnownRetentionFields) setNumFieldsForType(unknownRetentionType, numKnownRetentionFields) + setNumFieldsForType(defaultAggregationID, numDefaultAggregationIDFields) + setNumFieldsForType(shortAggregationID, numShortAggregationIDFields) + setNumFieldsForType(longAggregationID, numLongAggregationIDFields) + setNumFieldsForType(policyType, numPolicyFields) } diff --git a/protocol/msgpack/types.go b/protocol/msgpack/types.go index e2db02f..e56cdf0 100644 --- a/protocol/msgpack/types.go +++ b/protocol/msgpack/types.go @@ -102,6 +102,9 @@ type encoderBase interface { // resetData resets the encoder data. resetData() + // encodeStoragePolicy encodes a storage policy. + encodeStoragePolicy(p policy.StoragePolicy) + // encodePolicy encodes a policy. encodePolicy(p policy.Policy) @@ -153,6 +156,9 @@ type iteratorBase interface { // reader returns the buffered reader. reader() bufReader + // decodeStoragePolicy decodes a storage policy. + decodeStoragePolicy() policy.StoragePolicy + // decodePolicy decodes a policy. decodePolicy() policy.Policy @@ -324,7 +330,7 @@ type AggregatedIterator interface { Next() bool // Value returns the current raw metric and the applicable policy. - Value() (aggregated.RawMetric, policy.Policy) + Value() (aggregated.RawMetric, policy.StoragePolicy) // Err returns the error encountered during decoding, if any. Err() error diff --git a/protocol/msgpack/unaggregated_encoder_test.go b/protocol/msgpack/unaggregated_encoder_test.go index 8799333..57fde4d 100644 --- a/protocol/msgpack/unaggregated_encoder_test.go +++ b/protocol/msgpack/unaggregated_encoder_test.go @@ -190,7 +190,7 @@ func TestUnaggregatedEncodeArrayLenError(t *testing.T) { time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(time.Second, xtime.Second, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), }, ), } @@ -228,9 +228,36 @@ func testCapturingUnaggregatedEncoder(t *testing.T) (UnaggregatedEncoder, *[]int return encoder, result } -func expectedResultsForPolicy(t *testing.T, p policy.Policy) []interface{} { +func expectedResultsForUnaggregatedPolicy(t *testing.T, p policy.Policy) []interface{} { results := []interface{}{numFieldsForType(policyType)} + results = append(results, expectedResultsForPolicy(t, p.StoragePolicy())...) + return append(results, expectedResultsForCompressedAggregationTypes(t, p.AggregationID())...) +} + +func expectedResultsForCompressedAggregationTypes(t *testing.T, compressed policy.AggregationID) []interface{} { + results := []interface{}{} + + if compressed.IsDefault() { + return append(results, numFieldsForType(defaultAggregationID), int64(defaultAggregationID)) + } + + if len(compressed) == 1 { + return append(results, numFieldsForType(shortAggregationID), int64(shortAggregationID), int64(compressed[0])) + } + + results = append(results, numFieldsForType(longAggregationID), int64(longAggregationID), int64(len(compressed))) + + for _, code := range compressed { + results = append(results, code) + } + + return results +} + +func expectedResultsForPolicy(t *testing.T, p policy.StoragePolicy) []interface{} { + results := []interface{}{numFieldsForType(storagePolicyType)} + resolutionValue, err := policy.ValueFromResolution(p.Resolution()) if err == nil { results = append(results, []interface{}{ @@ -274,7 +301,7 @@ func expectedResultsForStagedPolicies(t *testing.T, sp policy.StagedPolicies) [] len(policies), } for _, p := range policies { - results = append(results, expectedResultsForPolicy(t, p)...) + results = append(results, expectedResultsForUnaggregatedPolicy(t, p)...) } return results } diff --git a/protocol/msgpack/unaggregated_iterator_test.go b/protocol/msgpack/unaggregated_iterator_test.go index e9384a5..be15bcf 100644 --- a/protocol/msgpack/unaggregated_iterator_test.go +++ b/protocol/msgpack/unaggregated_iterator_test.go @@ -449,7 +449,7 @@ func TestUnaggregatedIteratorDecodePolicyWithCustomResolution(t *testing.T) { time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(3*time.Second, xtime.Second, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(3*time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -471,7 +471,7 @@ func TestUnaggregatedIteratorDecodePolicyWithCustomRetention(t *testing.T) { time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(time.Second, xtime.Second, 289*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, 289*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -493,7 +493,7 @@ func TestUnaggregatedIteratorDecodePolicyMoreFieldsThanExpected(t *testing.T) { time.Now().UnixNano(), true, []policy.Policy{ - policy.NewPolicy(time.Second, xtime.Second, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -502,8 +502,8 @@ func TestUnaggregatedIteratorDecodePolicyMoreFieldsThanExpected(t *testing.T) { baseEncoder := enc.encoderBase.(*baseEncoder) // Pretend we added an extra int field to the policy object. - baseEncoder.encodePolicyFn = func(p policy.Policy) { - baseEncoder.encodeNumObjectFields(numFieldsForType(policyType) + 1) + baseEncoder.encodeStoragePolicyFn = func(p policy.StoragePolicy) { + baseEncoder.encodeNumObjectFields(numFieldsForType(storagePolicyType) + 1) baseEncoder.encodeResolution(p.Resolution()) baseEncoder.encodeRetention(p.Retention()) baseEncoder.encodeVarint(0) diff --git a/protocol/msgpack/unaggregated_roundtrip_test.go b/protocol/msgpack/unaggregated_roundtrip_test.go index f06b42e..d955995 100644 --- a/protocol/msgpack/unaggregated_roundtrip_test.go +++ b/protocol/msgpack/unaggregated_roundtrip_test.go @@ -61,9 +61,9 @@ var ( time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(20*time.Second, xtime.Second, 6*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 2*24*time.Hour), - policy.NewPolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), policy.DefaultAggregationID), }, ), } @@ -73,16 +73,16 @@ var ( time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(20*time.Second, xtime.Second, 6*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 2*24*time.Hour), - policy.NewPolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( time.Now().Add(time.Minute).UnixNano(), true, []policy.Policy{ - policy.NewPolicy(time.Second, xtime.Second, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), }, ), } @@ -92,7 +92,7 @@ var ( time.Now().UnixNano(), true, []policy.Policy{ - policy.NewPolicy(time.Second, xtime.Unit(100), time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Unit(100), time.Hour), policy.DefaultAggregationID), }, ), } @@ -122,9 +122,9 @@ var ( time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(20*time.Second, xtime.Second, 6*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 2*24*time.Hour), - policy.NewPolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), policy.AggregationID{8}), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -137,7 +137,7 @@ var ( time.Now().UnixNano(), true, []policy.Policy{ - policy.NewPolicy(time.Second, xtime.Second, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -150,7 +150,7 @@ var ( time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(10*time.Minute, xtime.Minute, 45*24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 45*24*time.Hour), policy.AggregationID{100}), }, ), }, @@ -165,16 +165,16 @@ var ( time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(20*time.Second, xtime.Second, 6*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 2*24*time.Hour), - policy.NewPolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( time.Now().Add(time.Minute).UnixNano(), true, []policy.Policy{ - policy.NewPolicy(time.Second, xtime.Second, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -186,14 +186,14 @@ var ( time.Now().UnixNano(), true, []policy.Policy{ - policy.NewPolicy(time.Second, xtime.Second, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( time.Now().Add(time.Hour).UnixNano(), false, []policy.Policy{ - policy.NewPolicy(10*time.Minute, xtime.Minute, 45*24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 45*24*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -205,14 +205,14 @@ var ( time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(10*time.Minute, xtime.Minute, 45*24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 45*24*time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( time.Now().Add(time.Nanosecond).UnixNano(), false, []policy.Policy{ - policy.NewPolicy(5*time.Minute, xtime.Minute, 36*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 36*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -297,8 +297,8 @@ func TestUnaggregatedEncodeDecodeMetricWithPoliciesListStress(t *testing.T) { time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(time.Second, xtime.Second, 6*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 2*24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -307,16 +307,16 @@ func TestUnaggregatedEncodeDecodeMetricWithPoliciesListStress(t *testing.T) { time.Now().UnixNano(), false, []policy.Policy{ - policy.NewPolicy(20*time.Second, xtime.Second, 6*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 2*24*time.Hour), - policy.NewPolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(20*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 2*24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 25*24*time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( time.Now().Add(time.Minute).UnixNano(), true, []policy.Policy{ - policy.NewPolicy(time.Second, xtime.Second, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( diff --git a/rules/mapping_test.go b/rules/mapping_test.go index 09b9351..4114375 100644 --- a/rules/mapping_test.go +++ b/rules/mapping_test.go @@ -45,12 +45,17 @@ var ( }, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(24 * time.Hour), + }, }, - Retention: &schema.Retention{ - Period: int64(24 * time.Hour), + AggregationTypes: []schema.AggregationType{ + schema.AggregationType_P999, }, }, }, @@ -65,21 +70,25 @@ var ( }, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(24 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(24 * time.Hour), + }, }, }, &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(5 * time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(48 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(5 * time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(48 * time.Hour), + }, }, }, }, @@ -113,7 +122,7 @@ func TestNewMappingRule(t *testing.T) { tombstoned: false, cutoverNanos: 12345, policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), compressedP999), }, }, { @@ -121,8 +130,8 @@ func TestNewMappingRule(t *testing.T) { tombstoned: true, cutoverNanos: 67890, policies: []policy.Policy{ - policy.NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, }, } diff --git a/rules/result_test.go b/rules/result_test.go index a4c12f3..8090002 100644 --- a/rules/result_test.go +++ b/rules/result_test.go @@ -44,17 +44,17 @@ func TestMatchResult(t *testing.T) { 12345, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( 23456, true, []policy.Policy{ - policy.NewPolicy(30*time.Second, xtime.Second, 10*time.Hour), - policy.NewPolicy(2*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 10*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(2*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, ), } @@ -66,17 +66,17 @@ func TestMatchResult(t *testing.T) { 12345, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( 23456, false, []policy.Policy{ - policy.NewPolicy(30*time.Second, xtime.Second, 10*time.Hour), - policy.NewPolicy(2*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 10*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(2*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -120,8 +120,8 @@ func TestMatchResult(t *testing.T) { 23456, true, []policy.Policy{ - policy.NewPolicy(30*time.Second, xtime.Second, 10*time.Hour), - policy.NewPolicy(2*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 10*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(2*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -133,8 +133,8 @@ func TestMatchResult(t *testing.T) { 23456, false, []policy.Policy{ - policy.NewPolicy(30*time.Second, xtime.Second, 10*time.Hour), - policy.NewPolicy(2*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 10*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(2*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, ), }, diff --git a/rules/rollup_test.go b/rules/rollup_test.go index a69017e..f7a1ef9 100644 --- a/rules/rollup_test.go +++ b/rules/rollup_test.go @@ -49,12 +49,14 @@ var ( Tags: []string{"rtagName1", "rtagName2"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(24 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(24 * time.Hour), + }, }, }, }, @@ -75,21 +77,28 @@ var ( Tags: []string{"rtagName1", "rtagName2"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(24 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(24 * time.Hour), + }, }, }, &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(5 * time.Minute), - Precision: int64(time.Minute), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(5 * time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(48 * time.Hour), + }, }, - Retention: &schema.Retention{ - Period: int64(48 * time.Hour), + AggregationTypes: []schema.AggregationType{ + schema.AggregationType_MEAN, }, }, }, @@ -114,7 +123,7 @@ func TestNewRollupTargetNilPolicySchema(t *testing.T) { func TestRollupTargetSameTransform(t *testing.T) { policies := []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 2*24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*24*time.Hour), policy.DefaultAggregationID), } target := rollupTarget{Name: b("foo"), Tags: bs("bar1", "bar2")} inputs := []testRollupTargetData{ @@ -138,7 +147,7 @@ func TestRollupTargetSameTransform(t *testing.T) { func TestRollupTargetClone(t *testing.T) { policies := []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 2*24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*24*time.Hour), policy.DefaultAggregationID), } target := rollupTarget{Name: b("foo"), Tags: bs("bar1", "bar2"), Policies: policies} cloned := target.clone() @@ -148,7 +157,7 @@ func TestRollupTargetClone(t *testing.T) { // Change references in the cloned object should not mutate the original object. cloned.Tags[0] = b("bar3") - cloned.Policies[0] = policy.EmptyPolicy + cloned.Policies[0] = policy.DefaultPolicy require.Equal(t, target.Tags, bs("bar1", "bar2")) require.Equal(t, target.Policies, policies) } @@ -183,7 +192,7 @@ func TestRollupRuleValidSchema(t *testing.T) { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), }, }, }, @@ -197,8 +206,8 @@ func TestRollupRuleValidSchema(t *testing.T) { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), compressedMean), }, }, }, diff --git a/rules/ruleset.go b/rules/ruleset.go index fb47ce0..0de3fc4 100644 --- a/rules/ruleset.go +++ b/rules/ruleset.go @@ -367,10 +367,8 @@ func (rs *ruleSet) ActiveSet(t time.Time) Matcher { // resolvePolicies resolves the conflicts among policies if any, following the rules below: // * If two policies have the same resolution but different retention, the one with longer // retention period is chosen. -// * If two policies have the same retention but different resolution, the policy with higher -// resolution is chosen. -// * If a policy has lower resolution and shorter retention than another policy, the policy -// is superseded by the other policy and therefore ignored. +// * If two policies have the same resolution but different custom aggregation types, the +// aggregation types will be merged. func resolvePolicies(policies []policy.Policy) []policy.Policy { if len(policies) == 0 { return policies @@ -382,16 +380,14 @@ func resolvePolicies(policies []policy.Policy) []policy.Policy { // If the policy has the same resolution, it must have either the same or shorter retention // period due to sorting, so we keep the one with longer retention period and ignore this // policy. - if policies[curr].Resolution().Window == policies[i].Resolution().Window { - continue - } - // Otherwise the policy has lower resolution, so if it has shorter or the same retention - // period, we keep the one with higher resolution and longer retention period and ignore - // this policy. - if policies[curr].Retention() >= policies[i].Retention() { + if policies[curr].StoragePolicy().Resolution().Window == policies[i].StoragePolicy().Resolution().Window { + if res, merged := policies[curr].AggregationID().Merge(policies[i].AggregationID()); merged { + // Merged custom aggregation functions to the current policy. + policies[curr] = policy.NewPolicy(policies[curr].StoragePolicy(), res) + } continue } - // Now we are guaranteed the policy has lower resolution and higher retention than the + // Now we are guaranteed the policy has lower resolution than the // current one, so we want to keep it. curr++ policies[curr] = policies[i] diff --git a/rules/ruleset_test.go b/rules/ruleset_test.go index 09a3d59..c50a90b 100644 --- a/rules/ruleset_test.go +++ b/rules/ruleset_test.go @@ -34,6 +34,16 @@ import ( "github.com/stretchr/testify/require" ) +var ( + compressor = policy.NewAggregationTypeCompressor() + compressedUpper, _ = compressor.Compress(policy.AggregationTypes{policy.Upper}) + compressedCount, _ = compressor.Compress(policy.AggregationTypes{policy.Count}) + compressedLower, _ = compressor.Compress(policy.AggregationTypes{policy.Lower}) + compressedMean, _ = compressor.Compress(policy.AggregationTypes{policy.Mean}) + compressedP999, _ = compressor.Compress(policy.AggregationTypes{policy.P999}) + compressedCountAndMean, _ = compressor.Compress(policy.AggregationTypes{policy.Count, policy.Mean}) +) + func TestActiveRuleSetMatchMappingRules(t *testing.T) { inputs := []testMappingsData{ { @@ -46,9 +56,10 @@ func TestActiveRuleSetMatchMappingRules(t *testing.T) { 22000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -63,8 +74,9 @@ func TestActiveRuleSetMatchMappingRules(t *testing.T) { 35000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 2*time.Hour), - policy.NewPolicy(30*time.Second, xtime.Second, 6*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -79,7 +91,7 @@ func TestActiveRuleSetMatchMappingRules(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -101,32 +113,53 @@ func TestActiveRuleSetMatchMappingRules(t *testing.T) { 10000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), compressedCount), + }, + ), + policy.NewStagedPolicies( + 15000, + false, + []policy.Policy{ + // different policies same resolution, merge aggregation types + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), compressedCountAndMean), }, ), policy.NewStagedPolicies( 20000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), compressedMean), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( 22000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + }, + ), + policy.NewStagedPolicies( + 30000, + false, + []policy.Policy{ + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( 34000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 2*time.Hour), - policy.NewPolicy(30*time.Second, xtime.Second, 6*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -142,7 +175,7 @@ func TestActiveRuleSetMatchMappingRules(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -180,9 +213,10 @@ func TestActiveRuleSetMatchRollupRules(t *testing.T) { 22000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -194,7 +228,7 @@ func TestActiveRuleSetMatchRollupRules(t *testing.T) { 22000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -214,7 +248,7 @@ func TestActiveRuleSetMatchRollupRules(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(time.Minute, xtime.Minute, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -241,49 +275,63 @@ func TestActiveRuleSetMatchRollupRules(t *testing.T) { 10000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( 20000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( 22000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + }, + ), + policy.NewStagedPolicies( + 30000, + false, + []policy.Policy{ + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( 34000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 2*time.Hour), - policy.NewPolicy(30*time.Second, xtime.Second, 6*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( 35000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 2*time.Hour), - policy.NewPolicy(30*time.Second, xtime.Second, 6*time.Hour), - policy.NewPolicy(45*time.Second, xtime.Second, 12*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(45*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( 38000, false, []policy.Policy{ - policy.NewPolicy(30*time.Second, xtime.Second, 6*time.Hour), - policy.NewPolicy(45*time.Second, xtime.Second, 12*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(45*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -295,7 +343,7 @@ func TestActiveRuleSetMatchRollupRules(t *testing.T) { 22000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), }, ), policy.NewStagedPolicies( @@ -382,9 +430,11 @@ func TestRuleSetActiveSet(t *testing.T) { 22000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), compressedUpper), + // the aggregation type came in from policy merging + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), compressedLower), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -399,8 +449,9 @@ func TestRuleSetActiveSet(t *testing.T) { 35000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 2*time.Hour), - policy.NewPolicy(30*time.Second, xtime.Second, 6*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -415,7 +466,7 @@ func TestRuleSetActiveSet(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), compressedP999), }, ), }, @@ -442,9 +493,10 @@ func TestRuleSetActiveSet(t *testing.T) { 22000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -456,7 +508,7 @@ func TestRuleSetActiveSet(t *testing.T) { 22000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), }, ), }, @@ -476,7 +528,7 @@ func TestRuleSetActiveSet(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(time.Minute, xtime.Minute, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -505,8 +557,9 @@ func TestRuleSetActiveSet(t *testing.T) { 35000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 2*time.Hour), - policy.NewPolicy(30*time.Second, xtime.Second, 6*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -521,7 +574,7 @@ func TestRuleSetActiveSet(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), compressedP999), }, ), }, @@ -548,8 +601,9 @@ func TestRuleSetActiveSet(t *testing.T) { 35000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 2*time.Hour), - policy.NewPolicy(30*time.Second, xtime.Second, 6*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -569,7 +623,7 @@ func TestRuleSetActiveSet(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(time.Minute, xtime.Minute, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -598,7 +652,9 @@ func TestRuleSetActiveSet(t *testing.T) { 100000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -613,7 +669,7 @@ func TestRuleSetActiveSet(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), compressedP999), }, ), }, @@ -640,8 +696,9 @@ func TestRuleSetActiveSet(t *testing.T) { 100000, false, []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 2*time.Hour), - policy.NewPolicy(30*time.Second, xtime.Second, 6*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -653,7 +710,7 @@ func TestRuleSetActiveSet(t *testing.T) { 100000, false, []policy.Policy{ - policy.NewPolicy(time.Minute, xtime.Minute, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -673,7 +730,7 @@ func TestRuleSetActiveSet(t *testing.T) { 24000, false, []policy.Policy{ - policy.NewPolicy(time.Minute, xtime.Minute, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, ), }, @@ -736,7 +793,7 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 10000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), compressedCount), }, }, &mappingRuleSnapshot{ @@ -745,9 +802,9 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 20000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 6*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), - policy.NewPolicy(10*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, }, &mappingRuleSnapshot{ @@ -756,7 +813,7 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 30000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(30*time.Second, xtime.Second, 6*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), }, }, }, @@ -771,7 +828,7 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 15000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), compressedMean), }, }, &mappingRuleSnapshot{ @@ -780,8 +837,8 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 22000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 2*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, }, &mappingRuleSnapshot{ @@ -803,9 +860,9 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 22000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, }, &mappingRuleSnapshot{ @@ -814,8 +871,8 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 34000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 2*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, }, }, @@ -830,7 +887,7 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 24000, filter: filter2, policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), }, }, }, @@ -845,7 +902,7 @@ func testMappingRules(t *testing.T) []*mappingRule { cutoverNanos: 100000, filter: filter1, policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), }, }, }, @@ -886,7 +943,7 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), }, }, }, @@ -901,9 +958,9 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 6*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), - policy.NewPolicy(10*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, }, }, @@ -918,7 +975,7 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(30*time.Second, xtime.Second, 6*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour), policy.DefaultAggregationID), }, }, }, @@ -939,7 +996,7 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), }, }, }, @@ -954,8 +1011,8 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 2*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, }, }, @@ -970,7 +1027,7 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(45*time.Second, xtime.Second, 12*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(45*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), }, }, }, @@ -991,16 +1048,16 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 12*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, 24*time.Hour), - policy.NewPolicy(5*time.Minute, xtime.Minute, 48*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, 24*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour), policy.DefaultAggregationID), }, }, { Name: b("rName2"), Tags: [][]byte{b("rtagName1")}, Policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 24*time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour), policy.DefaultAggregationID), }, }, }, @@ -1015,8 +1072,8 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName1"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(10*time.Second, xtime.Second, 2*time.Hour), - policy.NewPolicy(time.Minute, xtime.Minute, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(10*time.Second, xtime.Second, 2*time.Hour), policy.DefaultAggregationID), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, }, }, @@ -1044,7 +1101,7 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName3"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(time.Minute, xtime.Minute, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, }, }, @@ -1065,7 +1122,7 @@ func testRollupRules(t *testing.T) []*rollupRule { Name: b("rName3"), Tags: [][]byte{b("rtagName1"), b("rtagName2")}, Policies: []policy.Policy{ - policy.NewPolicy(time.Minute, xtime.Minute, time.Hour), + policy.NewPolicy(policy.NewStoragePolicy(time.Minute, xtime.Minute, time.Hour), policy.DefaultAggregationID), }, }, }, @@ -1114,12 +1171,14 @@ func testMappingRulesConfig() []*schema.MappingRule { TagFilters: map[string]string{"mtagName1": "mtagValue1"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(24 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(24 * time.Hour), + }, }, }, }, @@ -1131,30 +1190,36 @@ func testMappingRulesConfig() []*schema.MappingRule { TagFilters: map[string]string{"mtagName1": "mtagValue1"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(6 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(6 * time.Hour), + }, }, }, &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(5 * time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(48 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(5 * time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(48 * time.Hour), + }, }, }, &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(48 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(48 * time.Hour), + }, }, }, }, @@ -1166,12 +1231,14 @@ func testMappingRulesConfig() []*schema.MappingRule { TagFilters: map[string]string{"mtagName1": "mtagValue1"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(30 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(6 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(30 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(6 * time.Hour), + }, }, }, }, @@ -1188,12 +1255,14 @@ func testMappingRulesConfig() []*schema.MappingRule { TagFilters: map[string]string{"mtagName1": "mtagValue1"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(12 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(12 * time.Hour), + }, }, }, }, @@ -1205,21 +1274,28 @@ func testMappingRulesConfig() []*schema.MappingRule { TagFilters: map[string]string{"mtagName1": "mtagValue1"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(2 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(2 * time.Hour), + }, }, }, &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(time.Minute), - Precision: int64(time.Minute), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(time.Hour), + }, }, - Retention: &schema.Retention{ - Period: int64(time.Hour), + AggregationTypes: []schema.AggregationType{ + schema.AggregationType_LOWER, }, }, }, @@ -1243,30 +1319,39 @@ func testMappingRulesConfig() []*schema.MappingRule { TagFilters: map[string]string{"mtagName1": "mtagValue1"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(12 * time.Hour), + }, }, - Retention: &schema.Retention{ - Period: int64(12 * time.Hour), + AggregationTypes: []schema.AggregationType{ + schema.AggregationType_UPPER, }, }, &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(24 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(24 * time.Hour), + }, }, }, &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(5 * time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(48 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(5 * time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(48 * time.Hour), + }, }, }, }, @@ -1278,21 +1363,25 @@ func testMappingRulesConfig() []*schema.MappingRule { TagFilters: map[string]string{"mtagName1": "mtagValue1"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(2 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(2 * time.Hour), + }, }, }, &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(time.Hour), + }, }, }, }, @@ -1309,12 +1398,17 @@ func testMappingRulesConfig() []*schema.MappingRule { TagFilters: map[string]string{"mtagName1": "mtagValue2"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(24 * time.Hour), + }, }, - Retention: &schema.Retention{ - Period: int64(24 * time.Hour), + AggregationTypes: []schema.AggregationType{ + schema.AggregationType_P999, }, }, }, @@ -1331,12 +1425,14 @@ func testMappingRulesConfig() []*schema.MappingRule { TagFilters: map[string]string{"mtagName1": "mtagValue1"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(24 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(24 * time.Hour), + }, }, }, }, @@ -1365,12 +1461,14 @@ func testRollupRulesConfig() []*schema.RollupRule { Tags: []string{"rtagName1", "rtagName2"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(24 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(24 * time.Hour), + }, }, }, }, @@ -1391,30 +1489,36 @@ func testRollupRulesConfig() []*schema.RollupRule { Tags: []string{"rtagName1", "rtagName2"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(6 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(6 * time.Hour), + }, }, }, &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(5 * time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(48 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(5 * time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(48 * time.Hour), + }, }, }, &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(48 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(48 * time.Hour), + }, }, }, }, @@ -1435,12 +1539,14 @@ func testRollupRulesConfig() []*schema.RollupRule { Tags: []string{"rtagName1", "rtagName2"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(30 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(6 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(30 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(6 * time.Hour), + }, }, }, }, @@ -1466,12 +1572,14 @@ func testRollupRulesConfig() []*schema.RollupRule { Tags: []string{"rtagName1", "rtagName2"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(12 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(12 * time.Hour), + }, }, }, }, @@ -1492,21 +1600,25 @@ func testRollupRulesConfig() []*schema.RollupRule { Tags: []string{"rtagName1", "rtagName2"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(2 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(2 * time.Hour), + }, }, }, &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(time.Hour), + }, }, }, }, @@ -1542,30 +1654,36 @@ func testRollupRulesConfig() []*schema.RollupRule { Tags: []string{"rtagName1", "rtagName2"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(12 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(12 * time.Hour), + }, }, }, &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(24 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(24 * time.Hour), + }, }, }, &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(5 * time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(48 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(5 * time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(48 * time.Hour), + }, }, }, }, @@ -1575,12 +1693,14 @@ func testRollupRulesConfig() []*schema.RollupRule { Tags: []string{"rtagName1"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(24 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(24 * time.Hour), + }, }, }, }, @@ -1601,21 +1721,25 @@ func testRollupRulesConfig() []*schema.RollupRule { Tags: []string{"rtagName1", "rtagName2"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(10 * time.Second), - Precision: int64(time.Second), - }, - Retention: &schema.Retention{ - Period: int64(2 * time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(10 * time.Second), + Precision: int64(time.Second), + }, + Retention: &schema.Retention{ + Period: int64(2 * time.Hour), + }, }, }, &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(time.Hour), + }, }, }, }, @@ -1640,12 +1764,14 @@ func testRollupRulesConfig() []*schema.RollupRule { Tags: []string{"rtagName1", "rtagName2"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(time.Hour), + }, }, }, }, @@ -1671,12 +1797,14 @@ func testRollupRulesConfig() []*schema.RollupRule { Tags: []string{"rtagName1", "rtagName2"}, Policies: []*schema.Policy{ &schema.Policy{ - Resolution: &schema.Resolution{ - WindowSize: int64(time.Minute), - Precision: int64(time.Minute), - }, - Retention: &schema.Retention{ - Period: int64(time.Hour), + StoragePolicy: &schema.StoragePolicy{ + Resolution: &schema.Resolution{ + WindowSize: int64(time.Minute), + Precision: int64(time.Minute), + }, + Retention: &schema.Retention{ + Period: int64(time.Hour), + }, }, }, },